diff --git a/CHANGELOG.md b/CHANGELOG.md index f03c6a42350..e0ebf8a4e21 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,28 @@ +Release v1.44.308 (2023-07-25) +=== + +### Service Client Updates +* `service/billingconductor`: Updates service API and documentation +* `service/customer-profiles`: Updates service API and documentation +* `service/datasync`: Updates service API and documentation +* `service/dynamodb`: Updates service API, documentation, waiters, paginators, and examples + * Documentation updates for DynamoDB +* `service/ec2`: Updates service API and documentation + * This release adds an instance's peak and baseline network bandwidth as well as the memory sizes of an instance's inference accelerators to DescribeInstanceTypes. +* `service/emr-serverless`: Updates service API and documentation +* `service/lambda`: Updates service API + * Add Python 3.11 (python3.11) support to AWS Lambda +* `service/rds`: Updates service API, documentation, waiters, paginators, and examples + * This release adds support for monitoring storage optimization progress on the DescribeDBInstances API. +* `service/sagemaker`: Updates service API and documentation + * Mark ContentColumn and TargetLabelColumn as required Targets in TextClassificationJobConfig in CreateAutoMLJobV2API +* `service/securityhub`: Updates service API and documentation +* `service/sts`: Updates service API and documentation + * API updates for the AWS Security Token Service +* `service/transfer`: Updates service API and documentation + * This release adds support for SFTP Connectors. +* `service/wisdom`: Updates service API and documentation + Release v1.44.307 (2023-07-24) === diff --git a/aws/version.go b/aws/version.go index fb6257dde09..17983f5b599 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.307" +const SDKVersion = "1.44.308" diff --git a/models/apis/billingconductor/2021-07-30/api-2.json b/models/apis/billingconductor/2021-07-30/api-2.json index 7ed58690521..3c35f4c298b 100644 --- a/models/apis/billingconductor/2021-07-30/api-2.json +++ b/models/apis/billingconductor/2021-07-30/api-2.json @@ -605,7 +605,8 @@ "type":"structure", "required":["LinkedAccountIds"], "members":{ - "LinkedAccountIds":{"shape":"AccountIdList"} + "LinkedAccountIds":{"shape":"AccountIdList"}, + "AutoAssociate":{"shape":"Boolean"} } }, "AccountId":{ @@ -793,7 +794,8 @@ "CreationTime":{"shape":"Instant"}, "LastModifiedTime":{"shape":"Instant"}, "Status":{"shape":"BillingGroupStatus"}, - "StatusReason":{"shape":"BillingGroupStatusReason"} + "StatusReason":{"shape":"BillingGroupStatusReason"}, + "AccountGrouping":{"shape":"ListBillingGroupAccountGrouping"} } }, "BillingGroupName":{ @@ -821,6 +823,10 @@ "type":"string", "pattern":"\\d{4}-(0?[1-9]|1[012])" }, + "Boolean":{ + "type":"boolean", + "box":true + }, "ClientToken":{ "type":"string", "max":64, @@ -1296,6 +1302,12 @@ "NextToken":{"shape":"Token"} } }, + "ListBillingGroupAccountGrouping":{ + "type":"structure", + "members":{ + "AutoAssociate":{"shape":"Boolean"} + } + }, "ListBillingGroupCostReportsFilter":{ "type":"structure", "members":{ @@ -1323,7 +1335,8 @@ "members":{ "Arns":{"shape":"BillingGroupArnList"}, "PricingPlan":{"shape":"PricingPlanFullArn"}, - "Statuses":{"shape":"BillingGroupStatusList"} + "Statuses":{"shape":"BillingGroupStatusList"}, + "AutoAssociate":{"shape":"Boolean"} } }, "ListBillingGroupsInput":{ @@ -1870,6 +1883,12 @@ "members":{ } }, + "UpdateBillingGroupAccountGrouping":{ + "type":"structure", + "members":{ + "AutoAssociate":{"shape":"Boolean"} + } + }, "UpdateBillingGroupInput":{ "type":"structure", "required":["Arn"], @@ -1878,7 +1897,8 @@ "Name":{"shape":"BillingGroupName"}, "Status":{"shape":"BillingGroupStatus"}, "ComputationPreference":{"shape":"ComputationPreference"}, - "Description":{"shape":"BillingGroupDescription"} + "Description":{"shape":"BillingGroupDescription"}, + "AccountGrouping":{"shape":"UpdateBillingGroupAccountGrouping"} } }, "UpdateBillingGroupOutput":{ @@ -1892,7 +1912,8 @@ "Size":{"shape":"NumberOfAccounts"}, "LastModifiedTime":{"shape":"Instant"}, "Status":{"shape":"BillingGroupStatus"}, - "StatusReason":{"shape":"BillingGroupStatusReason"} + "StatusReason":{"shape":"BillingGroupStatusReason"}, + "AccountGrouping":{"shape":"UpdateBillingGroupAccountGrouping"} } }, "UpdateCustomLineItemChargeDetails":{ @@ -2096,7 +2117,9 @@ "ILLEGAL_OPERATION", "ILLEGAL_USAGE_TYPE", "INVALID_SKU_COMBO", - "INVALID_FILTER" + "INVALID_FILTER", + "TOO_MANY_AUTO_ASSOCIATE_BILLING_GROUPS", + "CANNOT_DELETE_AUTO_ASSOCIATE_BILLING_GROUP" ] } } diff --git a/models/apis/billingconductor/2021-07-30/docs-2.json b/models/apis/billingconductor/2021-07-30/docs-2.json index 7421e1b55d3..87a4e6777c8 100644 --- a/models/apis/billingconductor/2021-07-30/docs-2.json +++ b/models/apis/billingconductor/2021-07-30/docs-2.json @@ -38,7 +38,7 @@ "AWSCost": { "base": null, "refs": { - "BillingGroupCostReportElement$AWSCost": "
The actual Amazon Web Services charges for the billing group.
" + "BillingGroupCostReportElement$AWSCost": "The actual Amazon Web Services charges for the billing group.
" } }, "AccessDeniedException": { @@ -53,7 +53,7 @@ } }, "AccountAssociationsListElement": { - "base": "A representation of a linked account.
", + "base": "A representation of a linked account.
", "refs": { "AccountAssociationsList$member": null } @@ -61,24 +61,24 @@ "AccountEmail": { "base": null, "refs": { - "AccountAssociationsListElement$AccountEmail": "The Amazon Web Services account email.
" + "AccountAssociationsListElement$AccountEmail": "The Amazon Web Services account email.
" } }, "AccountGrouping": { - "base": "The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated family.
", + "base": "The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated billing family.
", "refs": { - "CreateBillingGroupInput$AccountGrouping": "The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated family.
" + "CreateBillingGroupInput$AccountGrouping": "The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated billing family.
" } }, "AccountId": { "base": null, "refs": { - "AccountAssociationsListElement$AccountId": "The associating array of account IDs.
", + "AccountAssociationsListElement$AccountId": "The associating array of account IDs.
", "AccountIdFilterList$member": null, "AccountIdList$member": null, - "BillingGroupListElement$PrimaryAccountId": "The account ID that serves as the main account in a billing group.
", + "BillingGroupListElement$PrimaryAccountId": "The account ID that serves as the main account in a billing group.
", "CreateBillingGroupInput$PrimaryAccountId": "The account ID that serves as the main account in a billing group.
", - "ListAccountAssociationsFilter$AccountId": "The Amazon Web Services account ID to filter on.
", + "ListAccountAssociationsFilter$AccountId": "The Amazon Web Services account ID to filter on.
", "UpdateBillingGroupOutput$PrimaryAccountId": "The account ID that serves as the main account in a billing group.
" } }, @@ -91,7 +91,7 @@ "AccountIdList": { "base": null, "refs": { - "AccountGrouping$LinkedAccountIds": "The account IDs that make up the billing group. Account IDs must be a part of the consolidated billing family, and not associated with another billing group.
", + "AccountGrouping$LinkedAccountIds": "The account IDs that make up the billing group. Account IDs must be a part of the consolidated billing family, and not associated with another billing group.
", "AssociateAccountsInput$AccountIds": "The associating array of account IDs.
", "DisassociateAccountsInput$AccountIds": "The array of account IDs to disassociate.
" } @@ -99,7 +99,7 @@ "AccountName": { "base": null, "refs": { - "AccountAssociationsListElement$AccountName": "The Amazon Web Services account name.
" + "AccountAssociationsListElement$AccountName": "The Amazon Web Services account name.
" } }, "Arn": { @@ -131,20 +131,20 @@ } }, "AssociateResourceError": { - "base": "A representation of a resource association error.
", + "base": "A representation of a resource association error.
", "refs": { - "AssociateResourceResponseElement$Error": " An AssociateResourceError
that will populate if the resource association fails.
An AssociateResourceError
that will populate if the resource association fails.
An AssociateResourceError
that's shown if the resource disassociation fails.
A static error code that's used to classify the type of failure.
" + "AssociateResourceError$Reason": "A static error code that's used to classify the type of failure.
" } }, "AssociateResourceResponseElement": { - "base": "A resource association result for a percentage custom line item.
", + "base": "A resource association result for a percentage custom line item.
", "refs": { "AssociateResourcesResponseList$member": null } @@ -159,7 +159,7 @@ "Association": { "base": null, "refs": { - "ListAccountAssociationsFilter$Association": " MONITORED
: linked accounts that are associated to billing groups.
UNMONITORED
: linked accounts that are not associated to billing groups.
Billing Group Arn
: linked accounts that are associated to the provided Billing Group Arn.
MONITORED
: linked accounts that are associated to billing groups.
UNMONITORED
: linked accounts that are not associated to billing groups.
Billing Group Arn
: linked accounts that are associated to the provided Billing Group Arn.
The Billing Group Arn that the linked account is associated to.
", + "AccountAssociationsListElement$BillingGroupArn": "The Billing Group Arn that the linked account is associated to.
", "AssociateAccountsInput$Arn": "The Amazon Resource Name (ARN) of the billing group that associates the array of account IDs.
", "AssociateAccountsOutput$Arn": "The Amazon Resource Name (ARN) of the billing group that associates the array of account IDs.
", "BillingGroupArnList$member": null, - "BillingGroupCostReportElement$Arn": "The Amazon Resource Name (ARN) of a billing group.
", - "BillingGroupListElement$Arn": "The Amazon Resource Number (ARN) that can be used to uniquely identify the billing group.
", + "BillingGroupCostReportElement$Arn": "The Amazon Resource Name (ARN) of a billing group.
", + "BillingGroupListElement$Arn": "The Amazon Resource Number (ARN) that can be used to uniquely identify the billing group.
", "CreateBillingGroupOutput$Arn": "The Amazon Resource Name (ARN) of the created billing group.
", "CreateCustomLineItemInput$BillingGroupArn": "The Amazon Resource Name (ARN) that references the billing group where the custom line item applies to.
", - "CustomLineItemListElement$BillingGroupArn": "The Amazon Resource Name (ARN) that references the billing group where the custom line item applies to.
", + "CustomLineItemListElement$BillingGroupArn": "The Amazon Resource Name (ARN) that references the billing group where the custom line item applies to.
", "CustomLineItemVersionListElement$BillingGroupArn": "The Amazon Resource Name (ARN) of the billing group that the custom line item applies to.
", "DeleteBillingGroupInput$Arn": "The Amazon Resource Name (ARN) of the billing group that you're deleting.
", "DeleteBillingGroupOutput$Arn": "The Amazon Resource Name (ARN) of the deleted billing group.
", @@ -215,12 +215,12 @@ "base": null, "refs": { "ListBillingGroupCostReportsFilter$BillingGroupArns": "The list of Amazon Resource Names (ARNs) used to filter billing groups to retrieve reports.
", - "ListBillingGroupsFilter$Arns": "The list of billing group Amazon Resource Names (ARNs) to retrieve information.
", - "ListCustomLineItemsFilter$BillingGroups": "The billing group Amazon Resource Names (ARNs) to retrieve information.
" + "ListBillingGroupsFilter$Arns": "The list of billing group Amazon Resource Names (ARNs) to retrieve information.
", + "ListCustomLineItemsFilter$BillingGroups": "The billing group Amazon Resource Names (ARNs) to retrieve information.
" } }, "BillingGroupCostReportElement": { - "base": "A summary report of actual Amazon Web Services charges and calculated Amazon Web Services charges, based on the associated pricing plan of a billing group.
", + "base": "A summary report of actual Amazon Web Services charges and calculated Amazon Web Services charges, based on the associated pricing plan of a billing group.
", "refs": { "BillingGroupCostReportList$member": null } @@ -234,7 +234,7 @@ "BillingGroupDescription": { "base": null, "refs": { - "BillingGroupListElement$Description": "The description of the billing group.
", + "BillingGroupListElement$Description": "The description of the billing group.
", "CreateBillingGroupInput$Description": "The description of the billing group.
", "UpdateBillingGroupInput$Description": "A description of the billing group.
", "UpdateBillingGroupOutput$Description": "A description of the billing group.
" @@ -253,7 +253,7 @@ } }, "BillingGroupListElement": { - "base": "A representation of a billing group.
", + "base": "A representation of a billing group.
", "refs": { "BillingGroupList$member": null } @@ -261,7 +261,7 @@ "BillingGroupName": { "base": null, "refs": { - "BillingGroupListElement$Name": "The name of the billing group.
", + "BillingGroupListElement$Name": "The name of the billing group.
", "CreateBillingGroupInput$Name": "The billing group name. The names must be unique.
", "UpdateBillingGroupInput$Name": "The name of the billing group. The names must be unique to each billing group.
", "UpdateBillingGroupOutput$Name": "The name of the billing group. The names must be unique to each billing group.
" @@ -270,7 +270,7 @@ "BillingGroupStatus": { "base": null, "refs": { - "BillingGroupListElement$Status": "The billing group status. Only one of the valid values can be used.
", + "BillingGroupListElement$Status": "The billing group status. Only one of the valid values can be used.
", "BillingGroupStatusList$member": null, "UpdateBillingGroupInput$Status": "The status of the billing group. Only one of the valid values can be used.
", "UpdateBillingGroupOutput$Status": "The status of the billing group. Only one of the valid values can be used.
" @@ -285,15 +285,15 @@ "BillingGroupStatusReason": { "base": null, "refs": { - "BillingGroupListElement$StatusReason": "The reason why the billing group is in its current status.
", + "BillingGroupListElement$StatusReason": "The reason why the billing group is in its current status.
", "UpdateBillingGroupOutput$StatusReason": "The reason why the billing group is in its current status.
" } }, "BillingPeriod": { "base": null, "refs": { - "CustomLineItemBillingPeriodRange$InclusiveStartBillingPeriod": "The inclusive start billing period that defines a billing period range where a custom line is applied.
", - "CustomLineItemBillingPeriodRange$ExclusiveEndBillingPeriod": "The inclusive end billing period that defines a billing period range where a custom line is applied.
", + "CustomLineItemBillingPeriodRange$InclusiveStartBillingPeriod": "The inclusive start billing period that defines a billing period range where a custom line is applied.
", + "CustomLineItemBillingPeriodRange$ExclusiveEndBillingPeriod": "The inclusive end billing period that defines a billing period range where a custom line is applied.
", "CustomLineItemVersionListElement$StartBillingPeriod": "The start billing period of the custom line item version.
", "CustomLineItemVersionListElement$EndBillingPeriod": "The end billing period of the custom line item version.
", "ListAccountAssociationsInput$BillingPeriod": "The preferred billing period to get account associations.
", @@ -314,6 +314,15 @@ "ListResourcesAssociatedToCustomLineItemResponseElement$EndBillingPeriod": "The end billing period of the associated resource.
" } }, + "Boolean": { + "base": null, + "refs": { + "AccountGrouping$AutoAssociate": "Specifies if this billing group will automatically associate newly added Amazon Web Services accounts that join your consolidated billing family.
", + "ListBillingGroupAccountGrouping$AutoAssociate": "Specifies if this billing group will automatically associate newly added Amazon Web Services accounts that join your consolidated billing family.
", + "ListBillingGroupsFilter$AutoAssociate": "Specifies if this billing group will automatically associate newly added Amazon Web Services accounts that join your consolidated billing family.
", + "UpdateBillingGroupAccountGrouping$AutoAssociate": "Specifies if this billing group will automatically associate newly added Amazon Web Services accounts that join your consolidated billing family.
" + } + }, "ClientToken": { "base": null, "refs": { @@ -324,7 +333,7 @@ } }, "ComputationPreference": { - "base": "The preferences and settings that will be used to compute the Amazon Web Services charges for a billing group.
", + "base": "The preferences and settings that will be used to compute the Amazon Web Services charges for a billing group.
", "refs": { "BillingGroupListElement$ComputationPreference": null, "CreateBillingGroupInput$ComputationPreference": "The preferences and settings that will be used to compute the Amazon Web Services charges for a billing group.
", @@ -397,13 +406,13 @@ "Currency": { "base": null, "refs": { - "BillingGroupCostReportElement$Currency": "The displayed currency.
" + "BillingGroupCostReportElement$Currency": "The displayed currency.
" } }, "CurrencyCode": { "base": null, "refs": { - "CustomLineItemListElement$CurrencyCode": "The custom line item's charge value currency. Only one of the valid values can be used.
", + "CustomLineItemListElement$CurrencyCode": "The custom line item's charge value currency. Only one of the valid values can be used.
", "CustomLineItemVersionListElement$CurrencyCode": "The charge value currency of the custom line item.
" } }, @@ -414,7 +423,7 @@ "BatchDisassociateResourcesFromCustomLineItemInput$TargetArn": "A percentage custom line item ARN to disassociate the resources from.
", "CreateCustomLineItemOutput$Arn": "The Amazon Resource Name (ARN) of the created custom line item.
", "CustomLineItemArns$member": null, - "CustomLineItemListElement$Arn": "The Amazon Resource Names (ARNs) for custom line items.
", + "CustomLineItemListElement$Arn": "The Amazon Resource Names (ARNs) for custom line items.
", "CustomLineItemVersionListElement$Arn": "A list of custom line item Amazon Resource Names (ARNs) to retrieve information.
", "DeleteCustomLineItemInput$Arn": "The ARN of the custom line item to be deleted.
", "DeleteCustomLineItemOutput$Arn": "Then ARN of the deleted custom line item.
", @@ -428,24 +437,24 @@ "CustomLineItemArns": { "base": null, "refs": { - "ListCustomLineItemsFilter$Arns": "A list of custom line item ARNs to retrieve information.
" + "ListCustomLineItemsFilter$Arns": "A list of custom line item ARNs to retrieve information.
" } }, "CustomLineItemAssociationElement": { "base": null, "refs": { - "AssociateResourceResponseElement$Arn": "The resource ARN that was associated to the custom line item.
", + "AssociateResourceResponseElement$Arn": "The resource ARN that was associated to the custom line item.
", "CustomLineItemAssociationsList$member": null, "CustomLineItemBatchAssociationsList$member": null, "CustomLineItemBatchDisassociationsList$member": null, - "DisassociateResourceResponseElement$Arn": "The resource ARN that was disassociated from the custom line item.
", + "DisassociateResourceResponseElement$Arn": "The resource ARN that was disassociated from the custom line item.
", "ListResourcesAssociatedToCustomLineItemResponseElement$Arn": "The ARN of the associated resource.
" } }, "CustomLineItemAssociationsList": { "base": null, "refs": { - "CustomLineItemPercentageChargeDetails$AssociatedValues": "A list of resource ARNs to associate to the percentage custom line item.
" + "CustomLineItemPercentageChargeDetails$AssociatedValues": "A list of resource ARNs to associate to the percentage custom line item.
" } }, "CustomLineItemBatchAssociationsList": { @@ -461,7 +470,7 @@ } }, "CustomLineItemBillingPeriodRange": { - "base": "The billing period range in which the custom line item request will be applied.
", + "base": "The billing period range in which the custom line item request will be applied.
", "refs": { "BatchAssociateResourcesToCustomLineItemInput$BillingPeriodRange": null, "BatchDisassociateResourcesFromCustomLineItemInput$BillingPeriodRange": null, @@ -471,7 +480,7 @@ } }, "CustomLineItemChargeDetails": { - "base": " The charge details of a custom line item. It should contain only one of Flat
or Percentage
.
The charge details of a custom line item. It should contain only one of Flat
or Percentage
.
A CustomLineItemChargeDetails
that describes the charge details for a custom line item.
The custom line item's fixed charge value in USD.
", + "CustomLineItemFlatChargeDetails$ChargeValue": "The custom line item's fixed charge value in USD.
", "ListCustomLineItemFlatChargeDetails$ChargeValue": "The custom line item's fixed charge value in USD.
", "UpdateCustomLineItemFlatChargeDetails$ChargeValue": "The custom line item's new fixed charge value in USD.
" } @@ -488,16 +497,16 @@ "base": null, "refs": { "CreateCustomLineItemInput$Description": "The description of the custom line item. This is shown on the Bills page in association with the charge value.
", - "CustomLineItemListElement$Description": "The custom line item's description. This is shown on the Bills page in association with the charge value.
", + "CustomLineItemListElement$Description": "The custom line item's description. This is shown on the Bills page in association with the charge value.
", "CustomLineItemVersionListElement$Description": "The description of the custom line item.
", "UpdateCustomLineItemInput$Description": "The new line item description of the custom line item.
", "UpdateCustomLineItemOutput$Description": "The description of the successfully updated custom line item.
" } }, "CustomLineItemFlatChargeDetails": { - "base": "A representation of the charge details that are associated with a flat custom line item.
", + "base": "A representation of the charge details that are associated with a flat custom line item.
", "refs": { - "CustomLineItemChargeDetails$Flat": " A CustomLineItemFlatChargeDetails
that describes the charge details of a flat custom line item.
A CustomLineItemFlatChargeDetails
that describes the charge details of a flat custom line item.
A representation of a custom line item.
", + "base": "A representation of a custom line item.
", "refs": { "CustomLineItemList$member": null } @@ -516,7 +525,7 @@ "base": null, "refs": { "CreateCustomLineItemInput$Name": "The name of the custom line item.
", - "CustomLineItemListElement$Name": "The custom line item's name.
", + "CustomLineItemListElement$Name": "The custom line item's name.
", "CustomLineItemNameList$member": null, "CustomLineItemVersionListElement$Name": "The name of the custom line item.
", "UpdateCustomLineItemInput$Name": "The new name for the custom line item.
", @@ -526,19 +535,19 @@ "CustomLineItemNameList": { "base": null, "refs": { - "ListCustomLineItemsFilter$Names": "A list of custom line items to retrieve information.
" + "ListCustomLineItemsFilter$Names": "A list of custom line items to retrieve information.
" } }, "CustomLineItemPercentageChargeDetails": { - "base": "A representation of the charge details that are associated with a percentage custom line item.
", + "base": "A representation of the charge details that are associated with a percentage custom line item.
", "refs": { - "CustomLineItemChargeDetails$Percentage": " A CustomLineItemPercentageChargeDetails
that describes the charge details of a percentage custom line item.
A CustomLineItemPercentageChargeDetails
that describes the charge details of a percentage custom line item.
The custom line item's percentage value. This will be multiplied against the combined value of its associated resources to determine its charge value.
", + "CustomLineItemPercentageChargeDetails$PercentageValue": "The custom line item's percentage value. This will be multiplied against the combined value of its associated resources to determine its charge value.
", "ListCustomLineItemPercentageChargeDetails$PercentageValue": "The custom line item's percentage value. This will be multiplied against the combined value of its associated resources to determine its charge value.
", "UpdateCustomLineItemPercentageChargeDetails$PercentageValue": "The custom line item's new percentage value. This will be multiplied against the combined value of its associated resources to determine its charge value.
" } @@ -546,7 +555,7 @@ "CustomLineItemProductCode": { "base": null, "refs": { - "CustomLineItemListElement$ProductCode": "The product code that's associated with the custom line item.
", + "CustomLineItemListElement$ProductCode": "The product code that's associated with the custom line item.
", "CustomLineItemVersionListElement$ProductCode": "The product code that’s associated with the custom line item.
" } }, @@ -560,7 +569,7 @@ "CustomLineItemType": { "base": null, "refs": { - "CustomLineItemChargeDetails$Type": "The type of the custom line item that indicates whether the charge is a fee or credit.
", + "CustomLineItemChargeDetails$Type": "The type of the custom line item that indicates whether the charge is a fee or credit.
", "ListCustomLineItemChargeDetails$Type": " The type of the custom line item that indicates whether the charge is a fee
or credit
.
A resource disassociation result for a percentage custom line item.
", + "base": "A resource disassociation result for a percentage custom line item.
", "refs": { "DisassociateResourcesResponseList$member": null } @@ -658,17 +667,17 @@ "Instant": { "base": null, "refs": { - "BillingGroupListElement$CreationTime": "The time when the billing group was created.
", - "BillingGroupListElement$LastModifiedTime": "The most recent time when the billing group was modified.
", - "CustomLineItemListElement$CreationTime": "The time created.
", - "CustomLineItemListElement$LastModifiedTime": "The most recent time when the custom line item was modified.
", + "BillingGroupListElement$CreationTime": "The time when the billing group was created.
", + "BillingGroupListElement$LastModifiedTime": "The most recent time when the billing group was modified.
", + "CustomLineItemListElement$CreationTime": "The time created.
", + "CustomLineItemListElement$LastModifiedTime": "The most recent time when the custom line item was modified.
", "CustomLineItemVersionListElement$CreationTime": "The time when the custom line item version was created.
", "CustomLineItemVersionListElement$LastModifiedTime": "The most recent time that the custom line item version was modified.
", "CustomLineItemVersionListElement$StartTime": "The inclusive start time.
", - "PricingPlanListElement$CreationTime": "The time when the pricing plan was created.
", - "PricingPlanListElement$LastModifiedTime": "The most recent time when the pricing plan was modified.
", - "PricingRuleListElement$CreationTime": "The time when the pricing rule was created.
", - "PricingRuleListElement$LastModifiedTime": "The most recent time when the pricing rule was modified.
", + "PricingPlanListElement$CreationTime": "The time when the pricing plan was created.
", + "PricingPlanListElement$LastModifiedTime": "The most recent time when the pricing plan was modified.
", + "PricingRuleListElement$CreationTime": "The time when the pricing rule was created.
", + "PricingRuleListElement$LastModifiedTime": "The most recent time when the pricing rule was modified.
", "UpdateBillingGroupOutput$LastModifiedTime": "The most recent time when the billing group was modified.
", "UpdateCustomLineItemOutput$LastModifiedTime": "The most recent time when the custom line item was modified.
", "UpdatePricingPlanOutput$LastModifiedTime": "The most recent time when the pricing plan was modified.
", @@ -681,7 +690,7 @@ } }, "ListAccountAssociationsFilter": { - "base": "The filter on the account ID of the linked account, or any of the following:
MONITORED
: linked accounts that are associated to billing groups.
UNMONITORED
: linked accounts that are not associated to billing groups.
Billing Group Arn
: linked accounts that are associated to the provided Billing Group Arn.
The filter on the account ID of the linked account, or any of the following:
MONITORED
: linked accounts that are associated to billing groups.
UNMONITORED
: linked accounts that are not associated to billing groups.
Billing Group Arn
: linked accounts that are associated to the provided Billing Group Arn.
The filter on the account ID of the linked account, or any of the following:
MONITORED
: linked accounts that are associated to billing groups.
UNMONITORED
: linked accounts that aren't associated to billing groups.
Billing Group Arn
: linked accounts that are associated to the provided billing group Arn.
Specifies if the billing group has the following features enabled.
", + "refs": { + "BillingGroupListElement$AccountGrouping": "Specifies if the billing group has automatic account association (AutoAssociate
) enabled.
The filter used to retrieve specific BillingGroupCostReportElements
.
The filter used to retrieve specific BillingGroupCostReportElements
.
A ListBillingGroupCostReportsFilter
to specify billing groups to retrieve reports from.
The filter that specifies the billing groups and pricing plans to retrieve billing group information.
", + "base": "The filter that specifies the billing groups and pricing plans to retrieve billing group information.
", "refs": { "ListBillingGroupsInput$Filters": "A ListBillingGroupsFilter
that specifies the billing group and pricing plan to retrieve billing group information.
A representation of the charge details of a custom line item.
", "refs": { - "CustomLineItemListElement$ChargeDetails": " A ListCustomLineItemChargeDetails
that describes the charge details of a custom line item.
A ListCustomLineItemChargeDetails
that describes the charge details of a custom line item.
A ListCustomLineItemChargeDetails
containing the charge details of the successfully updated custom line item.
A filter that specifies the custom line items and billing groups to retrieve FFLI information.
", + "base": "A filter that specifies the custom line items and billing groups to retrieve FFLI information.
", "refs": { "ListCustomLineItemsInput$Filters": "A ListCustomLineItemsFilter
that specifies the custom line item names and/or billing group Amazon Resource Names (ARNs) to retrieve FFLI information.
The filter that specifies the Amazon Resource Names (ARNs) of pricing plans, to retrieve pricing plan information.
", + "base": "The filter that specifies the Amazon Resource Names (ARNs) of pricing plans, to retrieve pricing plan information.
", "refs": { "ListPricingPlansInput$Filters": "A ListPricingPlansFilter
that specifies the Amazon Resource Name (ARNs) of pricing plans to retrieve pricing plans information.
The filter that specifies criteria that the pricing rules returned by the ListPricingRules
API will adhere to.
The filter that specifies criteria that the pricing rules returned by the ListPricingRules
API will adhere to.
A DescribePricingRuleFilter
that specifies the Amazon Resource Name (ARNs) of pricing rules to retrieve pricing rules information.
The billing group margin.
" + "BillingGroupCostReportElement$Margin": "The billing group margin.
" } }, "MarginPercentage": { "base": null, "refs": { - "BillingGroupCostReportElement$MarginPercentage": "The percentage of billing group margin.
" + "BillingGroupCostReportElement$MarginPercentage": "The percentage of billing group margin.
" } }, "MaxBillingGroupResults": { @@ -921,7 +936,7 @@ "base": null, "refs": { "CreatePricingRuleInput$ModifierPercentage": "A percentage modifier that's applied on the public pricing rates.
", - "PricingRuleListElement$ModifierPercentage": "A percentage modifier applied on the public pricing rates.
", + "PricingRuleListElement$ModifierPercentage": "A percentage modifier applied on the public pricing rates.
", "UpdatePricingRuleInput$ModifierPercentage": "The new modifier to show pricing plan rates as a percentage.
", "UpdatePricingRuleOutput$ModifierPercentage": "The new modifier to show pricing plan rates as a percentage.
" } @@ -929,21 +944,21 @@ "NumberOfAccounts": { "base": null, "refs": { - "BillingGroupListElement$Size": "The number of accounts in the particular billing group.
", + "BillingGroupListElement$Size": "The number of accounts in the particular billing group.
", "UpdateBillingGroupOutput$Size": "The number of accounts in the particular billing group.
" } }, "NumberOfAssociatedPricingRules": { "base": null, "refs": { - "PricingPlanListElement$Size": "The pricing rules count that's currently associated with this pricing plan list element.
", + "PricingPlanListElement$Size": "The pricing rules count that's currently associated with this pricing plan list element.
", "UpdatePricingPlanOutput$Size": "The pricing rules count that's currently associated with this pricing plan list.
" } }, "NumberOfAssociations": { "base": null, "refs": { - "CustomLineItemListElement$AssociationSize": "The number of resources that are associated to the custom line item.
", + "CustomLineItemListElement$AssociationSize": "The number of resources that are associated to the custom line item.
", "CustomLineItemVersionListElement$AssociationSize": "The number of resources that are associated with the custom line item.
", "UpdateCustomLineItemOutput$AssociationSize": "The number of resources that are associated to the custom line item.
" } @@ -951,7 +966,7 @@ "NumberOfPricingPlansAssociatedWith": { "base": null, "refs": { - "PricingRuleListElement$AssociatedPricingPlanCount": "The pricing plans count that this pricing rule is associated with.
", + "PricingRuleListElement$AssociatedPricingPlanCount": "The pricing plans count that this pricing rule is associated with.
", "UpdatePricingRuleOutput$AssociatedPricingPlanCount": "The pricing plans count that this pricing rule is associated with.
" } }, @@ -976,7 +991,7 @@ "ListPricingRulesAssociatedToPricingPlanInput$PricingPlanArn": "The Amazon Resource Name (ARN) of the pricing plan for which associations are to be listed.
", "ListPricingRulesAssociatedToPricingPlanOutput$PricingPlanArn": "The Amazon Resource Name (ARN) of the pricing plan for which associations are listed.
", "PricingPlanArns$member": null, - "PricingPlanListElement$Arn": "The pricing plan Amazon Resource Names (ARN). This can be used to uniquely identify a pricing plan.
", + "PricingPlanListElement$Arn": "The pricing plan Amazon Resource Names (ARN). This can be used to uniquely identify a pricing plan.
", "UpdateBillingGroupOutput$PricingPlanArn": "The Amazon Resource Name (ARN) of the pricing plan to compute Amazon Web Services charges for the billing group.
", "UpdatePricingPlanInput$Arn": "The Amazon Resource Name (ARN) of the pricing plan that you're updating.
", "UpdatePricingPlanOutput$Arn": "The Amazon Resource Name (ARN) of the updated pricing plan.
" @@ -986,14 +1001,14 @@ "base": null, "refs": { "ListPricingPlansAssociatedWithPricingRuleOutput$PricingPlanArns": "The list containing pricing plans that are associated with the requested pricing rule.
", - "ListPricingPlansFilter$Arns": "A list of pricing plan Amazon Resource Names (ARNs) to retrieve information.
" + "ListPricingPlansFilter$Arns": "A list of pricing plan Amazon Resource Names (ARNs) to retrieve information.
" } }, "PricingPlanDescription": { "base": null, "refs": { "CreatePricingPlanInput$Description": "The description of the pricing plan.
", - "PricingPlanListElement$Description": "The pricing plan description.
", + "PricingPlanListElement$Description": "The pricing plan description.
", "UpdatePricingPlanInput$Description": "The description of the pricing plan.
", "UpdatePricingPlanOutput$Description": "The new description for the pricing rule.
" } @@ -1002,7 +1017,7 @@ "base": null, "refs": { "ComputationPreference$PricingPlanArn": "The Amazon Resource Name (ARN) of the pricing plan that's used to compute the Amazon Web Services charges for a billing group.
", - "ListBillingGroupsFilter$PricingPlan": "The pricing plan Amazon Resource Names (ARNs) to retrieve information.
" + "ListBillingGroupsFilter$PricingPlan": "The pricing plan Amazon Resource Names (ARNs) to retrieve information.
" } }, "PricingPlanList": { @@ -1012,7 +1027,7 @@ } }, "PricingPlanListElement": { - "base": "A representation of a pricing plan.
", + "base": "A representation of a pricing plan.
", "refs": { "PricingPlanList$member": null } @@ -1021,7 +1036,7 @@ "base": null, "refs": { "CreatePricingPlanInput$Name": "The name of the pricing plan. The names must be unique to each pricing plan.
", - "PricingPlanListElement$Name": "The name of a pricing plan.
", + "PricingPlanListElement$Name": "The name of a pricing plan.
", "UpdatePricingPlanInput$Name": "The name of the pricing plan. The name must be unique to each pricing plan.
", "UpdatePricingPlanOutput$Name": "The name of the pricing plan. The name must be unique to each pricing plan.
" } @@ -1037,7 +1052,7 @@ "PricingRuleArns$member": null, "PricingRuleArnsInput$member": null, "PricingRuleArnsNonEmptyInput$member": null, - "PricingRuleListElement$Arn": "The Amazon Resource Name (ARN) used to uniquely identify a pricing rule.
", + "PricingRuleListElement$Arn": "The Amazon Resource Name (ARN) used to uniquely identify a pricing rule.
", "UpdatePricingRuleInput$Arn": "The Amazon Resource Name (ARN) of the pricing rule to update.
", "UpdatePricingRuleOutput$Arn": "The Amazon Resource Name (ARN) of the successfully updated pricing rule.
" } @@ -1046,7 +1061,7 @@ "base": null, "refs": { "ListPricingRulesAssociatedToPricingPlanOutput$PricingRuleArns": "A list containing pricing rules that are associated with the requested pricing plan.
", - "ListPricingRulesFilter$Arns": "A list containing the pricing rule Amazon Resource Names (ARNs) to include in the API response.
" + "ListPricingRulesFilter$Arns": "A list containing the pricing rule Amazon Resource Names (ARNs) to include in the API response.
" } }, "PricingRuleArnsInput": { @@ -1066,7 +1081,7 @@ "base": null, "refs": { "CreatePricingRuleInput$Description": "The pricing rule description.
", - "PricingRuleListElement$Description": "The pricing rule description.
", + "PricingRuleListElement$Description": "The pricing rule description.
", "UpdatePricingRuleInput$Description": "The new description for the pricing rule.
", "UpdatePricingRuleOutput$Description": "The new description for the pricing rule.
" } @@ -1078,7 +1093,7 @@ } }, "PricingRuleListElement": { - "base": "A representation of a pricing rule.
", + "base": "A representation of a pricing rule.
", "refs": { "PricingRuleList$member": null } @@ -1087,7 +1102,7 @@ "base": null, "refs": { "CreatePricingRuleInput$Name": "The pricing rule name. The names must be unique to each pricing rule.
", - "PricingRuleListElement$Name": "The name of a pricing rule.
", + "PricingRuleListElement$Name": "The name of a pricing rule.
", "UpdatePricingRuleInput$Name": "The new name of the pricing rule. The name must be unique to each pricing rule.
", "UpdatePricingRuleOutput$Name": "The new name of the pricing rule. The name must be unique to each pricing rule.
" } @@ -1096,7 +1111,7 @@ "base": null, "refs": { "CreatePricingRuleInput$Scope": "The scope of pricing rule that indicates if it's globally applicable, or it's service-specific.
", - "PricingRuleListElement$Scope": "The scope of pricing rule that indicates if it is globally applicable, or if it is service-specific.
", + "PricingRuleListElement$Scope": "The scope of pricing rule that indicates if it is globally applicable, or if it is service-specific.
", "UpdatePricingRuleOutput$Scope": "The scope of pricing rule that indicates if it's globally applicable, or it's service-specific.
" } }, @@ -1104,7 +1119,7 @@ "base": null, "refs": { "CreatePricingRuleInput$Type": "The type of pricing rule.
", - "PricingRuleListElement$Type": "The type of pricing rule.
", + "PricingRuleListElement$Type": "The type of pricing rule.
", "UpdatePricingRuleInput$Type": "The new pricing rule type.
", "UpdatePricingRuleOutput$Type": "The new pricing rule type.
" } @@ -1112,7 +1127,7 @@ "ProformaCost": { "base": null, "refs": { - "BillingGroupCostReportElement$ProformaCost": "The hypothetical Amazon Web Services charges based on the associated pricing plan of a billing group.
" + "BillingGroupCostReportElement$ProformaCost": "The hypothetical Amazon Web Services charges based on the associated pricing plan of a billing group.
" } }, "ResourceNotFoundException": { @@ -1131,7 +1146,7 @@ "base": null, "refs": { "CreatePricingRuleInput$Service": " If the Scope
attribute is set to SERVICE
or SKU
, the attribute indicates which service the PricingRule
is applicable for.
If the Scope
attribute is SERVICE
, this attribute indicates which service the PricingRule
is applicable for.
If the Scope
attribute is SERVICE
, this attribute indicates which service the PricingRule
is applicable for.
If the Scope
attribute is set to SERVICE
, the attribute indicates which service the PricingRule
is applicable for.
The reason why the resource association failed.
", + "AssociateResourceError$Message": "The reason why the resource association failed.
", "ConflictException$Message": null, "ConflictException$ResourceId": "Identifier of the resource in use.
", "ConflictException$ResourceType": "Type of the resource in use.
", @@ -1159,8 +1174,8 @@ "ServiceLimitExceededException$ServiceCode": "The unique code for the service of the limit that is being exceeded.
", "ThrottlingException$Message": null, "ValidationException$Message": null, - "ValidationExceptionField$Name": "The field name.
", - "ValidationExceptionField$Message": "The message describing why the field failed validation.
" + "ValidationExceptionField$Name": "The field name.
", + "ValidationExceptionField$Message": "The message describing why the field failed validation.
" } }, "TagKey": { @@ -1257,6 +1272,13 @@ "refs": { } }, + "UpdateBillingGroupAccountGrouping": { + "base": "Specifies if the billing group has the following features enabled.
", + "refs": { + "UpdateBillingGroupInput$AccountGrouping": "Specifies if the billing group has automatic account association (AutoAssociate
) enabled.
Specifies if the billing group has automatic account association (AutoAssociate
) enabled.
The input doesn't match with the constraints specified by Amazon Web Services services.
", + "base": "The input doesn't match with the constraints specified by Amazon Web Services.
", "refs": { } }, "ValidationExceptionField": { - "base": "The field's information of a request that resulted in an exception.
", + "base": "The field's information of a request that resulted in an exception.
", "refs": { "ValidationExceptionFieldList$member": null } diff --git a/models/apis/billingconductor/2021-07-30/endpoint-rule-set-1.json b/models/apis/billingconductor/2021-07-30/endpoint-rule-set-1.json index b4ea1f3843c..a71edcaf080 100644 --- a/models/apis/billingconductor/2021-07-30/endpoint-rule-set-1.json +++ b/models/apis/billingconductor/2021-07-30/endpoint-rule-set-1.json @@ -138,208 +138,40 @@ }, "aws" ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://billingconductor-fips.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://billingconductor-fips.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseFIPS" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } + false ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://billingconductor.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseDualStack" }, + false + ] + } + ], + "endpoint": { + "url": "https://billingconductor.us-east-1.amazonaws.com", + "properties": { + "authSchemes": [ { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "name": "sigv4", + "signingName": "billingconductor", + "signingRegion": "us-east-1" } ] }, - { - "conditions": [], - "endpoint": { - "url": "https://billingconductor.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "billingconductor", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] + "headers": {} + }, + "type": "endpoint" }, { "conditions": [ @@ -543,33 +375,6 @@ "conditions": [], "type": "tree", "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://billingconductor.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "billingconductor", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, { "conditions": [], "endpoint": { diff --git a/models/apis/billingconductor/2021-07-30/endpoint-tests-1.json b/models/apis/billingconductor/2021-07-30/endpoint-tests-1.json index b9c1c6c3798..05fb6f36f46 100644 --- a/models/apis/billingconductor/2021-07-30/endpoint-tests-1.json +++ b/models/apis/billingconductor/2021-07-30/endpoint-tests-1.json @@ -18,8 +18,8 @@ }, "params": { "Region": "aws-global", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -31,8 +31,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -44,8 +44,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -57,8 +57,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -79,8 +79,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -92,8 +92,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -105,8 +105,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -118,8 +118,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -131,8 +131,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -144,8 +144,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -157,8 +157,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -170,8 +170,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -183,8 +183,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -196,8 +207,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -209,8 +231,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -222,8 +255,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -235,8 +279,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -248,8 +292,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -261,8 +305,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -273,8 +317,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -285,10 +329,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/apis/customer-profiles/2020-08-15/api-2.json b/models/apis/customer-profiles/2020-08-15/api-2.json index a44114d975a..64f028fbc65 100644 --- a/models/apis/customer-profiles/2020-08-15/api-2.json +++ b/models/apis/customer-profiles/2020-08-15/api-2.json @@ -414,6 +414,22 @@ {"shape":"InternalServerException"} ] }, + "GetSimilarProfiles":{ + "name":"GetSimilarProfiles", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/matches" + }, + "input":{"shape":"GetSimilarProfilesRequest"}, + "output":{"shape":"GetSimilarProfilesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ] + }, "GetWorkflow":{ "name":"GetWorkflow", "http":{ @@ -606,6 +622,22 @@ {"shape":"InternalServerException"} ] }, + "ListRuleBasedMatches":{ + "name":"ListRuleBasedMatches", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/profiles/ruleBasedMatches" + }, + "input":{"shape":"ListRuleBasedMatchesRequest"}, + "output":{"shape":"ListRuleBasedMatchesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ] + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -859,6 +891,12 @@ "PostalCode":{"shape":"string1To255"} } }, + "AddressList":{ + "type":"list", + "member":{"shape":"string1To255"}, + "max":4, + "min":1 + }, "AppflowIntegration":{ "type":"structure", "required":["FlowDefinition"], @@ -939,11 +977,28 @@ "max":2, "min":1 }, + "AttributeMatchingModel":{ + "type":"string", + "enum":[ + "ONE_TO_ONE", + "MANY_TO_MANY" + ] + }, "AttributeSourceIdMap":{ "type":"map", "key":{"shape":"string1To255"}, "value":{"shape":"uuid"} }, + "AttributeTypesSelector":{ + "type":"structure", + "required":["AttributeMatchingModel"], + "members":{ + "AttributeMatchingModel":{"shape":"AttributeMatchingModel"}, + "Address":{"shape":"AddressList"}, + "PhoneNumber":{"shape":"PhoneNumberList"}, + "EmailAddress":{"shape":"EmailList"} + } + }, "Attributes":{ "type":"map", "key":{"shape":"string1To255"}, @@ -1103,6 +1158,7 @@ "DefaultEncryptionKey":{"shape":"encryptionKey"}, "DeadLetterQueueUrl":{"shape":"sqsQueueUrl"}, "Matching":{"shape":"MatchingRequest"}, + "RuleBasedMatching":{"shape":"RuleBasedMatchingRequest"}, "Tags":{"shape":"TagMap"} } }, @@ -1120,6 +1176,7 @@ "DefaultEncryptionKey":{"shape":"encryptionKey"}, "DeadLetterQueueUrl":{"shape":"sqsQueueUrl"}, "Matching":{"shape":"MatchingResponse"}, + "RuleBasedMatching":{"shape":"RuleBasedMatchingResponse"}, "CreatedAt":{"shape":"timestamp"}, "LastUpdatedAt":{"shape":"timestamp"}, "Tags":{"shape":"TagMap"} @@ -1487,6 +1544,12 @@ "max":1.0, "min":0.0 }, + "EmailList":{ + "type":"list", + "member":{"shape":"string1To255"}, + "max":3, + "min":1 + }, "EventStreamDestinationDetails":{ "type":"structure", "required":[ @@ -1757,6 +1820,7 @@ "DeadLetterQueueUrl":{"shape":"sqsQueueUrl"}, "Stats":{"shape":"DomainStats"}, "Matching":{"shape":"MatchingResponse"}, + "RuleBasedMatching":{"shape":"RuleBasedMatchingResponse"}, "CreatedAt":{"shape":"timestamp"}, "LastUpdatedAt":{"shape":"timestamp"}, "Tags":{"shape":"TagMap"} @@ -1963,6 +2027,46 @@ "Keys":{"shape":"KeyMap"} } }, + "GetSimilarProfilesRequest":{ + "type":"structure", + "required":[ + "DomainName", + "MatchType", + "SearchKey", + "SearchValue" + ], + "members":{ + "NextToken":{ + "shape":"token", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "location":"querystring", + "locationName":"max-results" + }, + "DomainName":{ + "shape":"name", + "location":"uri", + "locationName":"DomainName" + }, + "MatchType":{"shape":"MatchType"}, + "SearchKey":{"shape":"string1To255"}, + "SearchValue":{"shape":"string1To255"} + } + }, + "GetSimilarProfilesResponse":{ + "type":"structure", + "members":{ + "ProfileIds":{"shape":"ProfileIdList"}, + "MatchId":{"shape":"string1To255"}, + "MatchType":{"shape":"MatchType"}, + "RuleLevel":{"shape":"RuleLevel"}, + "ConfidenceScore":{"shape":"Double"}, + "NextToken":{"shape":"token"} + } + }, "GetWorkflowRequest":{ "type":"structure", "required":[ @@ -2508,6 +2612,34 @@ "NextToken":{"shape":"token"} } }, + "ListRuleBasedMatchesRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "NextToken":{ + "shape":"token", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "location":"querystring", + "locationName":"max-results" + }, + "DomainName":{ + "shape":"name", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "ListRuleBasedMatchesResponse":{ + "type":"structure", + "members":{ + "MatchIds":{"shape":"MatchIdList"}, + "NextToken":{"shape":"token"} + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], @@ -2604,6 +2736,10 @@ "Object":{"shape":"Object"} } }, + "MatchIdList":{ + "type":"list", + "member":{"shape":"string1To255"} + }, "MatchItem":{ "type":"structure", "members":{ @@ -2612,6 +2748,13 @@ "ConfidenceScore":{"shape":"Double"} } }, + "MatchType":{ + "type":"string", + "enum":[ + "RULE_BASED_MATCHING", + "ML_BASED_MATCHING" + ] + }, "MatchesList":{ "type":"list", "member":{"shape":"MatchItem"} @@ -2647,6 +2790,35 @@ "ExportingConfig":{"shape":"ExportingConfig"} } }, + "MatchingRule":{ + "type":"structure", + "required":["Rule"], + "members":{ + "Rule":{"shape":"MatchingRuleAttributeList"} + } + }, + "MatchingRuleAttributeList":{ + "type":"list", + "member":{"shape":"string1To255"}, + "max":15, + "min":1 + }, + "MatchingRules":{ + "type":"list", + "member":{"shape":"MatchingRule"}, + "max":15, + "min":1 + }, + "MaxAllowedRuleLevelForMatching":{ + "type":"integer", + "max":15, + "min":1 + }, + "MaxAllowedRuleLevelForMerging":{ + "type":"integer", + "max":15, + "min":1 + }, "MergeProfilesRequest":{ "type":"structure", "required":[ @@ -2753,6 +2925,12 @@ "OTHER" ] }, + "PhoneNumberList":{ + "type":"list", + "member":{"shape":"string1To255"}, + "max":4, + "min":1 + }, "Profile":{ "type":"structure", "members":{ @@ -2947,6 +3125,45 @@ "max":512, "pattern":"arn:aws:iam:.*:[0-9]+:.*" }, + "RuleBasedMatchingRequest":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{"shape":"optionalBoolean"}, + "MatchingRules":{"shape":"MatchingRules"}, + "MaxAllowedRuleLevelForMerging":{"shape":"MaxAllowedRuleLevelForMerging"}, + "MaxAllowedRuleLevelForMatching":{"shape":"MaxAllowedRuleLevelForMatching"}, + "AttributeTypesSelector":{"shape":"AttributeTypesSelector"}, + "ConflictResolution":{"shape":"ConflictResolution"}, + "ExportingConfig":{"shape":"ExportingConfig"} + } + }, + "RuleBasedMatchingResponse":{ + "type":"structure", + "members":{ + "Enabled":{"shape":"optionalBoolean"}, + "MatchingRules":{"shape":"MatchingRules"}, + "Status":{"shape":"RuleBasedMatchingStatus"}, + "MaxAllowedRuleLevelForMerging":{"shape":"MaxAllowedRuleLevelForMerging"}, + "MaxAllowedRuleLevelForMatching":{"shape":"MaxAllowedRuleLevelForMatching"}, + "AttributeTypesSelector":{"shape":"AttributeTypesSelector"}, + "ConflictResolution":{"shape":"ConflictResolution"}, + "ExportingConfig":{"shape":"ExportingConfig"} + } + }, + "RuleBasedMatchingStatus":{ + "type":"string", + "enum":[ + "PENDING", + "IN_PROGRESS", + "ACTIVE" + ] + }, + "RuleLevel":{ + "type":"integer", + "max":15, + "min":1 + }, "S3ConnectorOperator":{ "type":"string", "enum":[ @@ -3430,6 +3647,7 @@ "DefaultEncryptionKey":{"shape":"encryptionKey"}, "DeadLetterQueueUrl":{"shape":"sqsQueueUrl"}, "Matching":{"shape":"MatchingRequest"}, + "RuleBasedMatching":{"shape":"RuleBasedMatchingRequest"}, "Tags":{"shape":"TagMap"} } }, @@ -3446,6 +3664,7 @@ "DefaultEncryptionKey":{"shape":"encryptionKey"}, "DeadLetterQueueUrl":{"shape":"sqsQueueUrl"}, "Matching":{"shape":"MatchingResponse"}, + "RuleBasedMatching":{"shape":"RuleBasedMatchingResponse"}, "CreatedAt":{"shape":"timestamp"}, "LastUpdatedAt":{"shape":"timestamp"}, "Tags":{"shape":"TagMap"} diff --git a/models/apis/customer-profiles/2020-08-15/docs-2.json b/models/apis/customer-profiles/2020-08-15/docs-2.json index b06b5ac9ac5..121daf6cb3c 100644 --- a/models/apis/customer-profiles/2020-08-15/docs-2.json +++ b/models/apis/customer-profiles/2020-08-15/docs-2.json @@ -4,7 +4,7 @@ "operations": { "AddProfileKey": "Associates a new key value with a specific profile, such as a Contact Record ContactId.
A profile object can have a single unique key and any number of additional keys that can be used to identify the profile that it belongs to.
", "CreateCalculatedAttributeDefinition": "Creates a new calculated attribute definition. After creation, new object data ingested into Customer Profiles will be included in the calculated attribute, which can be retrieved for a profile using the GetCalculatedAttributeForProfile API. Defining a calculated attribute makes it available for all profiles within a domain. Each calculated attribute can only reference one ObjectType
and at most, two fields from that ObjectType
.
Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations.
Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.
Use this API or UpdateDomain to enable identity resolution: set Matching
to true.
To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.
", + "CreateDomain": "Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations.
Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.
Use this API or UpdateDomain to enable identity resolution: set Matching
to true.
To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.
", "CreateEventStream": "Creates an event stream, which is a subscription to real-time events, such as when profiles are created and updated through Amazon Connect Customer Profiles.
Each event stream can be associated with only one Kinesis Data Stream destination in the same region and Amazon Web Services account as the customer profiles domain
", "CreateIntegrationWorkflow": "Creates an integration workflow. An integration workflow is an async process which ingests historic data and sets up an integration for ongoing updates. The supported Amazon AppFlow sources are Salesforce, ServiceNow, and Marketo.
", "CreateProfile": "Creates a standard profile.
A standard profile represents the following attributes for a customer profile in a domain.
", @@ -27,6 +27,7 @@ "GetMatches": "Before calling this API, use CreateDomain or UpdateDomain to enable identity resolution: set Matching
to true.
GetMatches returns potentially matching profiles, based on the results of the latest run of a machine learning process.
The process of matching duplicate profiles. If Matching
= true
, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.
After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig
in the MatchingRequest
, you can download the results from S3.
Amazon Connect uses the following profile attributes to identify matches:
PhoneNumber
HomePhoneNumber
BusinessPhoneNumber
MobilePhoneNumber
EmailAddress
PersonalEmailAddress
BusinessEmailAddress
FullName
For example, two or more profiles—with spelling mistakes such as John Doe and Jhn Doe, or different casing email addresses such as JOHN_DOE@ANYCOMPANY.COM and johndoe@anycompany.com, or different phone number formats such as 555-010-0000 and +1-555-010-0000—can be detected as belonging to the same customer John Doe and merged into a unified profile.
", "GetProfileObjectType": "Returns the object types for a specific domain.
", "GetProfileObjectTypeTemplate": "Returns the template information for a specific object type.
A template is a predefined ProfileObjectType, such as “Salesforce-Account” or “Salesforce-Contact.” When a user sends a ProfileObject, using the PutProfileObject API, with an ObjectTypeName that matches one of the TemplateIds, it uses the mappings from the template.
", + "GetSimilarProfiles": "Returns a set of profiles that belong to the same matching group using the matchId
or profileId
. You can also specify the type of matching that you want for finding similar profiles using either RULE_BASED_MATCHING
or ML_BASED_MATCHING
.
Get details of specified workflow.
", "GetWorkflowSteps": "Get granular list of steps in workflow.
", "ListAccountIntegrations": "Lists all of the integrations associated to a specific URI in the AWS account.
", @@ -39,6 +40,7 @@ "ListProfileObjectTypeTemplates": "Lists all of the template information for object types.
", "ListProfileObjectTypes": "Lists all of the templates available within the service.
", "ListProfileObjects": "Returns a list of objects associated with a profile of a given ProfileObjectType.
", + "ListRuleBasedMatches": "Returns a set of MatchIds
that belong to the given domain.
Displays the tags associated with an Amazon Connect Customer Profiles resource. In Connect Customer Profiles, domains, profile object types, and integrations can be tagged.
", "ListWorkflows": "Query to list all workflows.
", "MergeProfiles": "Runs an AWS Lambda job that does the following:
All the profileKeys in the ProfileToBeMerged
will be moved to the main profile.
All the objects in the ProfileToBeMerged
will be moved to the main profile.
All the ProfileToBeMerged
will be deleted at the end.
All the profileKeys in the ProfileIdsToBeMerged
will be moved to the main profile.
Standard fields are merged as follows:
Fields are always \"union\"-ed if there are no conflicts in standard fields or attributeKeys.
When there are conflicting fields:
If no SourceProfileIds
entry is specified, the main Profile value is always taken.
If a SourceProfileIds
entry is specified, the specified profileId is always taken, even if it is a NULL value.
You can use MergeProfiles together with GetMatches, which returns potentially matching profiles, or use it with the results of another matching system. After profiles have been merged, they cannot be separated (unmerged).
", @@ -49,7 +51,7 @@ "TagResource": "Assigns one or more tags (key-value pairs) to the specified Amazon Connect Customer Profiles resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values. In Connect Customer Profiles, domains, profile object types, and integrations can be tagged.
Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.
You can use the TagResource action with a resource that already has tags. If you specify a new tag key, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.
You can associate as many as 50 tags with a resource.
", "UntagResource": "Removes one or more tags from the specified Amazon Connect Customer Profiles resource. In Connect Customer Profiles, domains, profile object types, and integrations can be tagged.
", "UpdateCalculatedAttributeDefinition": "Updates an existing calculated attribute definition. When updating the Conditions, note that increasing the date range of a calculated attribute will not trigger inclusion of historical data greater than the current date range.
", - "UpdateDomain": "Updates the properties of a domain, including creating or selecting a dead letter queue or an encryption key.
After a domain is created, the name can’t be changed.
Use this API or CreateDomain to enable identity resolution: set Matching
to true.
To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.
To add or remove tags on an existing Domain, see TagResource/UntagResource.
", + "UpdateDomain": "Updates the properties of a domain, including creating or selecting a dead letter queue or an encryption key.
After a domain is created, the name can’t be changed.
Use this API or CreateDomain to enable identity resolution: set Matching
to true.
To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.
To add or remove tags on an existing Domain, see TagResource/UntagResource.
", "UpdateProfile": "Updates the properties of a profile. The ProfileId is required for updating a customer profile.
When calling the UpdateProfile API, specifying an empty string value means that any existing value will be removed. Not specifying a string value means that any value already there will be kept.
" }, "shapes": { @@ -103,6 +105,7 @@ "GetProfileObjectTypeTemplateResponse$TemplateId": "A unique identifier for the object template.
", "GetProfileObjectTypeTemplateResponse$SourceName": "The name of the source of the object template.
", "GetProfileObjectTypeTemplateResponse$SourceObject": "The source of the object template.
", + "GetSimilarProfilesRequest$DomainName": "The unique name of the domain.
", "GetWorkflowRequest$DomainName": "The unique name of the domain.
", "GetWorkflowStepsRequest$DomainName": "The unique name of the domain.
", "IdentityResolutionJob$DomainName": "The unique name of the domain.
", @@ -119,6 +122,7 @@ "ListProfileObjectTypeTemplateItem$SourceObject": "The source of the object template.
", "ListProfileObjectTypesRequest$DomainName": "The unique name of the domain.
", "ListProfileObjectsRequest$DomainName": "The unique name of the domain.
", + "ListRuleBasedMatchesRequest$DomainName": "The unique name of the domain.
", "ListWorkflowsRequest$DomainName": "The unique name of the domain.
", "MergeProfilesRequest$DomainName": "The unique name of the domain.
", "ObjectFilter$KeyName": "A searchable identifier of a profile object. The predefined keys you can use to search for _asset
include: _assetId
, _assetName
, and _serialNumber
. The predefined keys you can use to search for _case
include: _caseId
. The predefined keys you can use to search for _order
include: _orderId
.
The customer’s billing address.
" } }, + "AddressList": { + "base": null, + "refs": { + "AttributeTypesSelector$Address": "The Address
type. You can choose from Address
, BusinessAddress
, MaillingAddress
, and ShippingAddress
.
You only can use the Address type in the MatchingRule
. For example, if you want to match profile based on BusinessAddress.City
or MaillingAddress.City
, you need to choose the BusinessAddress
and the MaillingAddress
to represent the Address type and specify the Address.City
on the matching rule.
Details for workflow of type APPFLOW_INTEGRATION
.
A list of attribute items specified in the mathematical expression.
" } }, + "AttributeMatchingModel": { + "base": null, + "refs": { + "AttributeTypesSelector$AttributeMatchingModel": "Configures the AttributeMatchingModel
, you can either choose ONE_TO_ONE
or MANY_TO_MANY
.
A unique identifier for the attributes field to be merged.
" } }, + "AttributeTypesSelector": { + "base": "Configuration information about the AttributeTypesSelector
where the rule-based identity resolution uses to match profiles. You can choose how profiles are compared across attribute types and which attribute to use for matching from each type. There are three attribute types you can configure:
Email type
You can choose from Email
, BusinessEmail
, and PersonalEmail
Phone number type
You can choose from Phone
, HomePhone
, and MobilePhone
Address type
You can choose from Address
, BusinessAddress
, MaillingAddress
, and ShippingAddress
You can either choose ONE_TO_ONE
or MANY_TO_MANY
as the AttributeMatchingModel
. When choosing MANY_TO_MANY
, the system can match attribute across the sub-types of an attribute type. For example, if the value of the Email
field of Profile A and the value of BusinessEmail
field of Profile B matches, the two profiles are matched on the Email type. When choosing ONE_TO_ONE
the system can only match if the sub-types are exact matches. For example, only when the value of the Email
field of Profile A and the value of the Email
field of Profile B matches, the two profiles are matched on the Email type.
Configures information about the AttributeTypesSelector
where the rule-based identity resolution uses to match profiles.
Configures information about the AttributeTypesSelector
where the rule-based identity resolution uses to match profiles.
How the auto-merging process should resolve conflicts between different profiles.
", "refs": { "AutoMerging$ConflictResolution": "How the auto-merging process should resolve conflicts between different profiles. For example, if Profile A and Profile B have the same FirstName
and LastName
(and that is the matching criteria), which EmailAddress
should be used?
How the auto-merging process should resolve conflicts between different profiles.
" + "GetAutoMergingPreviewRequest$ConflictResolution": "How the auto-merging process should resolve conflicts between different profiles.
", + "RuleBasedMatchingRequest$ConflictResolution": null, + "RuleBasedMatchingResponse$ConflictResolution": null } }, "ConflictResolvingModel": { @@ -507,6 +532,7 @@ "Double": { "base": null, "refs": { + "GetSimilarProfilesResponse$ConfidenceScore": "It only has value when the MatchType
is ML_BASED_MATCHING
.A number between 0 and 1, where a higher score means higher similarity. Examining match confidence scores lets you distinguish between groups of similar records in which the system is highly confident (which you may decide to merge), groups of similar records about which the system is uncertain (which you may decide to have reviewed by a human), and groups of similar records that the system deems to be unlikely (which you may decide to reject). Given confidence scores vary as per the data input, it should not be used as an absolute measure of matching quality.
A number between 0 and 1, where a higher score means higher similarity. Examining match confidence scores lets you distinguish between groups of similar records in which the system is highly confident (which you may decide to merge), groups of similar records about which the system is uncertain (which you may decide to have reviewed by a human), and groups of similar records that the system deems to be unlikely (which you may decide to reject). Given confidence scores vary as per the data input, it should not be used an absolute measure of matching quality.
" } }, @@ -517,6 +543,12 @@ "GetAutoMergingPreviewRequest$MinAllowedConfidenceScoreForMerging": "Minimum confidence score required for profiles within a matching group to be merged during the auto-merge process.
" } }, + "EmailList": { + "base": null, + "refs": { + "AttributeTypesSelector$EmailAddress": "The Email
type. You can choose from EmailAddress
, BusinessEmailAddress
and PersonalEmailAddress
.
You only can use the EmailAddress
type in the MatchingRule
. For example, if you want to match profile based on PersonalEmailAddress
or BusinessEmailAddress
, you need to choose the PersonalEmailAddress
and the BusinessEmailAddress
to represent the EmailAddress
type and only specify the EmailAddress
on the matching rule.
Details of the destination being used for the EventStream.
", "refs": { @@ -553,7 +585,9 @@ "base": "Configuration information about the S3 bucket where Identity Resolution Jobs writes result files.
You need to give Customer Profiles service principal write permission to your S3 bucket. Otherwise, you'll get an exception in the API response. For an example policy, see Amazon Connect Customer Profiles cross-service confused deputy prevention.
Configuration information for exporting Identity Resolution results, for example, to an S3 bucket.
", - "MatchingResponse$ExportingConfig": "Configuration information for exporting Identity Resolution results, for example, to an S3 bucket.
" + "MatchingResponse$ExportingConfig": "Configuration information for exporting Identity Resolution results, for example, to an S3 bucket.
", + "RuleBasedMatchingRequest$ExportingConfig": null, + "RuleBasedMatchingResponse$ExportingConfig": null } }, "ExportingLocation": { @@ -724,6 +758,16 @@ "refs": { } }, + "GetSimilarProfilesRequest": { + "base": null, + "refs": { + } + }, + "GetSimilarProfilesResponse": { + "base": null, + "refs": { + } + }, "GetWorkflowRequest": { "base": null, "refs": { @@ -970,6 +1014,16 @@ "refs": { } }, + "ListRuleBasedMatchesRequest": { + "base": null, + "refs": { + } + }, + "ListRuleBasedMatchesResponse": { + "base": null, + "refs": { + } + }, "ListTagsForResourceRequest": { "base": null, "refs": { @@ -1008,12 +1062,25 @@ "SourceConnectorProperties$Marketo": "The properties that are applied when Marketo is being used as a source.
" } }, + "MatchIdList": { + "base": null, + "refs": { + "ListRuleBasedMatchesResponse$MatchIds": "The list of MatchIds
for the given domain.
The Match group object.
", "refs": { "MatchesList$member": null } }, + "MatchType": { + "base": null, + "refs": { + "GetSimilarProfilesRequest$MatchType": "Specify the type of matching to get similar profiles for.
", + "GetSimilarProfilesResponse$MatchType": "Specify the type of matching to get similar profiles for.
" + } + }, "MatchesList": { "base": null, "refs": { @@ -1047,6 +1114,39 @@ "UpdateDomainResponse$Matching": "The process of matching duplicate profiles. If Matching
= true
, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.
After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig
in the MatchingRequest
, you can download the results from S3.
Specifies how does the rule-based matching process should match profiles. You can choose from the following attributes to build the matching Rule:
AccountNumber
Address.Address
Address.City
Address.Country
Address.County
Address.PostalCode
Address.State
Address.Province
BirthDate
BusinessName
EmailAddress
FirstName
Gender
LastName
MiddleName
PhoneNumber
Any customized profile attributes that start with the Attributes
A single rule level of the MatchRules
. Configures how the rule-based matching process should match profiles.
Configures how the rule-based matching process should match profiles. You can have up to 15 MatchingRule
in the MatchingRules
.
Configures how the rule-based matching process should match profiles. You can have up to 15 MatchingRule
in the MatchingRules
.
Indicates the maximum allowed rule level.
", + "RuleBasedMatchingResponse$MaxAllowedRuleLevelForMatching": "Indicates the maximum allowed rule level.
" + } + }, + "MaxAllowedRuleLevelForMerging": { + "base": null, + "refs": { + "RuleBasedMatchingRequest$MaxAllowedRuleLevelForMerging": "", + "RuleBasedMatchingResponse$MaxAllowedRuleLevelForMerging": "" + } + }, "MergeProfilesRequest": { "base": null, "refs": { @@ -1125,6 +1225,12 @@ "UpdateProfileRequest$PartyType": "The type of profile used to describe the customer.
" } }, + "PhoneNumberList": { + "base": null, + "refs": { + "AttributeTypesSelector$PhoneNumber": "The PhoneNumber
type. You can choose from PhoneNumber
, HomePhoneNumber
, and MobilePhoneNumber
.
You only can use the PhoneNumber
type in the MatchingRule
. For example, if you want to match a profile based on Phone
or HomePhone
, you need to choose the Phone
and the HomePhone
to represent the PhoneNumber
type and only specify the PhoneNumber
on the matching rule.
The standard profile of a customer.
", "refs": { @@ -1134,6 +1240,7 @@ "ProfileIdList": { "base": null, "refs": { + "GetSimilarProfilesResponse$ProfileIds": "Set of profileId
s that belong to the same matching group.
A list of identifiers for profiles that match.
" } }, @@ -1220,6 +1327,33 @@ "CreateIntegrationWorkflowRequest$RoleArn": "The Amazon Resource Name (ARN) of the IAM role. Customer Profiles assumes this role to create resources on your behalf as part of workflow execution.
" } }, + "RuleBasedMatchingRequest": { + "base": "The request to enable the rule-based matching.
", + "refs": { + "CreateDomainRequest$RuleBasedMatching": "The process of matching duplicate profiles using the Rule-Based matching. If RuleBasedMatching
= true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest
. You can use the ListRuleBasedMatches
and GetSimilarProfiles
API to return and review the results. Also, if you have configured ExportingConfig
in the RuleBasedMatchingRequest
, you can download the results from S3.
The process of matching duplicate profiles using the rule-Based matching. If RuleBasedMatching
= true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest
. You can use the ListRuleBasedMatches
and GetSimilarProfiles
API to return and review the results. Also, if you have configured ExportingConfig
in the RuleBasedMatchingRequest
, you can download the results from S3.
The response of the Rule-based matching request.
", + "refs": { + "CreateDomainResponse$RuleBasedMatching": "The process of matching duplicate profiles using the Rule-Based matching. If RuleBasedMatching
= true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest
. You can use the ListRuleBasedMatches
and GetSimilarProfiles
API to return and review the results. Also, if you have configured ExportingConfig
in the RuleBasedMatchingRequest
, you can download the results from S3.
The process of matching duplicate profiles using the Rule-Based matching. If RuleBasedMatching
= true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest
. You can use the ListRuleBasedMatches
and GetSimilarProfiles
API to return and review the results. Also, if you have configured ExportingConfig
in the RuleBasedMatchingRequest
, you can download the results from S3.
The process of matching duplicate profiles using the rule-Based matching. If RuleBasedMatching
= true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest
. You can use the ListRuleBasedMatches
and GetSimilarProfiles
API to return and review the results. Also, if you have configured ExportingConfig
in the RuleBasedMatchingRequest
, you can download the results from S3.
PENDING
The first status after configuration a rule-based matching rule. If it is an existing domain, the rule-based Identity Resolution waits one hour before creating the matching rule. If it is a new domain, the system will skip the PENDING
stage.
IN_PROGRESS
The system is creating the rule-based matching rule. Under this status, the system is evaluating the existing data and you can no longer change the Rule-based matching configuration.
ACTIVE
The rule is ready to use. You can change the rule a day after the status is in ACTIVE
.
The integer rule level that the profiles matched on.
" + } + }, "S3ConnectorOperator": { "base": null, "refs": { @@ -1700,6 +1834,7 @@ "base": null, "refs": { "GetMatchesRequest$MaxResults": "The maximum number of results to return per page.
", + "GetSimilarProfilesRequest$MaxResults": "The maximum number of objects returned per page.
", "GetWorkflowStepsRequest$MaxResults": "The maximum number of results to return per page.
", "ListAccountIntegrationsRequest$MaxResults": "The maximum number of objects returned per page.
", "ListCalculatedAttributeDefinitionsRequest$MaxResults": "The maximum number of calculated attribute definitions returned per page.
", @@ -1711,6 +1846,7 @@ "ListProfileObjectTypeTemplatesRequest$MaxResults": "The maximum number of objects returned per page.
", "ListProfileObjectTypesRequest$MaxResults": "The maximum number of objects returned per page.
", "ListProfileObjectsRequest$MaxResults": "The maximum number of objects returned per page.
", + "ListRuleBasedMatchesRequest$MaxResults": "The maximum number of MatchIds
returned per page.
The maximum number of results to return per page.
", "SearchProfilesRequest$MaxResults": "The maximum number of objects returned per page.
The default is 20 if this parameter is not included in the request.
" } @@ -1742,7 +1878,9 @@ "ListIntegrationsRequest$IncludeHidden": "Boolean to indicate if hidden integration should be returned. Defaults to False
.
The flag that enables the matching process of duplicate profiles.
", "MatchingResponse$Enabled": "The flag that enables the matching process of duplicate profiles.
", - "PutIntegrationResponse$IsUnstructured": "Boolean that shows if the Flow that's associated with the Integration is created in Amazon Appflow, or with ObjectTypeName equals _unstructured via API/CLI in flowDefinition.
" + "PutIntegrationResponse$IsUnstructured": "Boolean that shows if the Flow that's associated with the Integration is created in Amazon Appflow, or with ObjectTypeName equals _unstructured via API/CLI in flowDefinition.
", + "RuleBasedMatchingRequest$Enabled": "The flag that enables the rule-based matching process of duplicate profiles.
", + "RuleBasedMatchingResponse$Enabled": "The flag that enables the rule-based matching process of duplicate profiles.
" } }, "requestValueList": { @@ -1844,6 +1982,7 @@ "Address$Province": "The province in which a customer lives.
", "Address$Country": "The country in which a customer lives.
", "Address$PostalCode": "The postal code of a customer address.
", + "AddressList$member": null, "AppflowIntegrationWorkflowAttributes$RoleArn": "The Amazon Resource Name (ARN) of the IAM role. Customer Profiles assumes this role to create resources on your behalf as part of workflow execution.
", "AppflowIntegrationWorkflowStep$ExecutionMessage": "Message indicating execution of workflow step for APPFLOW_INTEGRATION
workflow.
Start datetime of records pulled in batch during execution of workflow step for APPFLOW_INTEGRATION
workflow.
The unique identifier of the profile object generated by the service.
", "DeleteWorkflowRequest$WorkflowId": "Unique identifier for the workflow.
", "DestinationSummary$Uri": "The StreamARN of the destination to deliver profile events to. For example, arn:aws:kinesis:region:account-id:stream/stream-name.
", + "EmailList$member": null, "EventStreamDestinationDetails$Uri": "The StreamARN of the destination to deliver profile events to. For example, arn:aws:kinesis:region:account-id:stream/stream-name.
", "EventStreamSummary$EventStreamArn": "A unique identifier for the event stream.
", "GetCalculatedAttributeForProfileResponse$IsDataPartial": "Indicates whether the calculated attribute’s value is based on partial data. If data is partial, it is set to true.
", @@ -1885,6 +2025,9 @@ "GetIntegrationResponse$WorkflowId": "Unique identifier for the workflow.
", "GetProfileObjectTypeResponse$SourceLastUpdatedTimestampFormat": "The format of your sourceLastUpdatedTimestamp
that was previously set up.
The format of your sourceLastUpdatedTimestamp
that was previously set up.
The string indicating the search key to be used.
", + "GetSimilarProfilesRequest$SearchValue": "The string based on SearchKey
to be searched for similar profiles.
The string matchId
that the similar profiles belong to.
Workflow error messages during execution (if any).
", "ListAccountIntegrationsRequest$Uri": "The URI of the S3 bucket or any other type of data source.
", "ListCalculatedAttributeForProfileItem$IsDataPartial": "Indicates whether the calculated attribute’s value is based on partial data. If data is partial, it is set to true.
", @@ -1894,9 +2037,12 @@ "ListProfileObjectsItem$ProfileObjectUniqueKey": "The unique identifier of the ProfileObject generated by the service.
", "ListWorkflowsItem$WorkflowId": "Unique identifier for the workflow.
", "ListWorkflowsItem$StatusDescription": "Description for workflow execution status.
", + "MatchIdList$member": null, "MatchItem$MatchId": "The unique identifiers for this group of profiles that match.
", "MatchingAttributes$member": null, + "MatchingRuleAttributeList$member": null, "ObjectTypeNames$key": null, + "PhoneNumberList$member": null, "Profile$AccountNumber": "A unique account number that you have given to the customer.
", "Profile$BusinessName": "The name of the customer’s business.
", "Profile$FirstName": "The customer’s first name.
", @@ -2015,6 +2161,8 @@ "refs": { "GetMatchesRequest$NextToken": "The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.
", "GetMatchesResponse$NextToken": "If there are additional results, this is the token for the next set of results.
", + "GetSimilarProfilesRequest$NextToken": "The pagination token from the previous GetSimilarProfiles
API call.
The pagination token from the previous GetSimilarProfiles
API call.
The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.
", "GetWorkflowStepsResponse$NextToken": "If there are additional results, this is the token for the next set of results.
", "ListAccountIntegrationsRequest$NextToken": "The pagination token from the previous ListAccountIntegrations API call.
", @@ -2037,6 +2185,8 @@ "ListProfileObjectTypesResponse$NextToken": "Identifies the next page of results to return.
", "ListProfileObjectsRequest$NextToken": "The pagination token from the previous call to ListProfileObjects.
", "ListProfileObjectsResponse$NextToken": "The pagination token from the previous call to ListProfileObjects.
", + "ListRuleBasedMatchesRequest$NextToken": "The pagination token from the previous ListRuleBasedMatches
API call.
The pagination token from the previous ListRuleBasedMatches
API call.
The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.
", "ListWorkflowsResponse$NextToken": "If there are additional results, this is the token for the next set of results.
", "SearchProfilesRequest$NextToken": "The pagination token from the previous SearchProfiles API call.
", diff --git a/models/apis/datasync/2018-11-09/api-2.json b/models/apis/datasync/2018-11-09/api-2.json index 88c28326ec0..a67ce2920cd 100644 --- a/models/apis/datasync/2018-11-09/api-2.json +++ b/models/apis/datasync/2018-11-09/api-2.json @@ -54,6 +54,19 @@ {"shape":"InternalException"} ] }, + "CreateLocationAzureBlob":{ + "name":"CreateLocationAzureBlob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLocationAzureBlobRequest"}, + "output":{"shape":"CreateLocationAzureBlobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ] + }, "CreateLocationEfs":{ "name":"CreateLocationEfs", "http":{ @@ -263,6 +276,19 @@ ], "endpoint":{"hostPrefix":"discovery-"} }, + "DescribeLocationAzureBlob":{ + "name":"DescribeLocationAzureBlob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLocationAzureBlobRequest"}, + "output":{"shape":"DescribeLocationAzureBlobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ] + }, "DescribeLocationEfs":{ "name":"DescribeLocationEfs", "http":{ @@ -676,6 +702,19 @@ ], "endpoint":{"hostPrefix":"discovery-"} }, + "UpdateLocationAzureBlob":{ + "name":"UpdateLocationAzureBlob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLocationAzureBlobRequest"}, + "output":{"shape":"UpdateLocationAzureBlobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ] + }, "UpdateLocationHdfs":{ "name":"UpdateLocationHdfs", "http":{ @@ -842,6 +881,46 @@ "BEST_EFFORT" ] }, + "AzureAccessTier":{ + "type":"string", + "enum":[ + "HOT", + "COOL", + "ARCHIVE" + ] + }, + "AzureBlobAuthenticationType":{ + "type":"string", + "enum":["SAS"] + }, + "AzureBlobContainerUrl":{ + "type":"string", + "max":325, + "pattern":"^https:\\/\\/[A-Za-z0-9]((\\.|-+)?[A-Za-z0-9]){0,252}\\/[a-z0-9](-?[a-z0-9]){2,62}$" + }, + "AzureBlobSasConfiguration":{ + "type":"structure", + "required":["Token"], + "members":{ + "Token":{"shape":"AzureBlobSasToken"} + } + }, + "AzureBlobSasToken":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^.+$", + "sensitive":true + }, + "AzureBlobSubdirectory":{ + "type":"string", + "max":1024, + "pattern":"^[\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}\\p{C}]*$" + }, + "AzureBlobType":{ + "type":"string", + "enum":["BLOCK"] + }, "BytesPerSecond":{ "type":"long", "min":-1 @@ -889,6 +968,30 @@ "AgentArn":{"shape":"AgentArn"} } }, + "CreateLocationAzureBlobRequest":{ + "type":"structure", + "required":[ + "ContainerUrl", + "AuthenticationType", + "AgentArns" + ], + "members":{ + "ContainerUrl":{"shape":"AzureBlobContainerUrl"}, + "AuthenticationType":{"shape":"AzureBlobAuthenticationType"}, + "SasConfiguration":{"shape":"AzureBlobSasConfiguration"}, + "BlobType":{"shape":"AzureBlobType"}, + "AccessTier":{"shape":"AzureAccessTier"}, + "Subdirectory":{"shape":"AzureBlobSubdirectory"}, + "AgentArns":{"shape":"AgentArnList"}, + "Tags":{"shape":"InputTagList"} + } + }, + "CreateLocationAzureBlobResponse":{ + "type":"structure", + "members":{ + "LocationArn":{"shape":"LocationArn"} + } + }, "CreateLocationEfsRequest":{ "type":"structure", "required":[ @@ -1227,6 +1330,25 @@ "JobEndTime":{"shape":"DiscoveryTime"} } }, + "DescribeLocationAzureBlobRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{"shape":"LocationArn"} + } + }, + "DescribeLocationAzureBlobResponse":{ + "type":"structure", + "members":{ + "LocationArn":{"shape":"LocationArn"}, + "LocationUri":{"shape":"LocationUri"}, + "AuthenticationType":{"shape":"AzureBlobAuthenticationType"}, + "BlobType":{"shape":"AzureBlobType"}, + "AccessTier":{"shape":"AzureAccessTier"}, + "AgentArns":{"shape":"AgentArnList"}, + "CreationTime":{"shape":"Time"} + } + }, "DescribeLocationEfsRequest":{ "type":"structure", "required":["LocationArn"], @@ -2918,6 +3040,24 @@ "members":{ } }, + "UpdateLocationAzureBlobRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{"shape":"LocationArn"}, + "Subdirectory":{"shape":"AzureBlobSubdirectory"}, + "AuthenticationType":{"shape":"AzureBlobAuthenticationType"}, + "SasConfiguration":{"shape":"AzureBlobSasConfiguration"}, + "BlobType":{"shape":"AzureBlobType"}, + "AccessTier":{"shape":"AzureAccessTier"}, + "AgentArns":{"shape":"AgentArnList"} + } + }, + "UpdateLocationAzureBlobResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateLocationHdfsRequest":{ "type":"structure", "required":["LocationArn"], diff --git a/models/apis/datasync/2018-11-09/docs-2.json b/models/apis/datasync/2018-11-09/docs-2.json index 508064a27e7..71682ab43e1 100644 --- a/models/apis/datasync/2018-11-09/docs-2.json +++ b/models/apis/datasync/2018-11-09/docs-2.json @@ -5,22 +5,24 @@ "AddStorageSystem": "Creates an Amazon Web Services resource for an on-premises storage system that you want DataSync Discovery to collect information about.
", "CancelTaskExecution": "Stops an DataSync task execution that's in progress. The transfer of some files are abruptly interrupted. File contents that're transferred to the destination might be incomplete or inconsistent with the source files.
However, if you start a new task execution using the same task and allow it to finish, file content on the destination will be complete and consistent. This applies to other unexpected failures that interrupt a task execution. In all of these cases, DataSync successfully completes the transfer when you start the next task execution.
", "CreateAgent": "Activates an DataSync agent that you've deployed in your storage environment. The activation process associates the agent with your Amazon Web Services account.
If you haven't deployed an agent yet, see the following topics to learn more:
If you're transferring between Amazon Web Services storage services, you don't need a DataSync agent.
Creates an endpoint for a Microsoft Azure Blob Storage container that DataSync can use as a transfer source or destination.
Before you begin, make sure you know how DataSync accesses Azure Blob Storage and works with access tiers and blob types. You also need a DataSync agent that can connect to your container.
", "CreateLocationEfs": "Creates an endpoint for an Amazon EFS file system that DataSync can access for a transfer. For more information, see Creating a location for Amazon EFS.
", "CreateLocationFsxLustre": "Creates an endpoint for an Amazon FSx for Lustre file system.
", "CreateLocationFsxOntap": "Creates an endpoint for an Amazon FSx for NetApp ONTAP file system that DataSync can access for a transfer. For more information, see Creating a location for FSx for ONTAP.
", "CreateLocationFsxOpenZfs": "Creates an endpoint for an Amazon FSx for OpenZFS file system that DataSync can access for a transfer. For more information, see Creating a location for FSx for OpenZFS.
Request parameters related to SMB
aren't supported with the CreateLocationFsxOpenZfs
operation.
Creates an endpoint for an Amazon FSx for Windows File Server file system.
", "CreateLocationHdfs": "Creates an endpoint for a Hadoop Distributed File System (HDFS).
", - "CreateLocationNfs": "Defines a file system on a Network File System (NFS) server that can be read from or written to.
", + "CreateLocationNfs": "Creates an endpoint for an Network File System (NFS) file server that DataSync can use for a data transfer.
", "CreateLocationObjectStorage": "Creates an endpoint for an object storage system that DataSync can access for a transfer. For more information, see Creating a location for object storage.
", "CreateLocationS3": "A location is an endpoint for an Amazon S3 bucket. DataSync can use the location as a source or destination for copying data.
Before you create your location, make sure that you read the following sections:
For more information, see Creating an Amazon S3 location.
", - "CreateLocationSmb": "Creates an endpoint for a Server Message Block (SMB) file server that DataSync can access for a transfer. For more information, see Creating an SMB location.
", + "CreateLocationSmb": "Creates an endpoint for a Server Message Block (SMB) file server that DataSync can use for a data transfer.
Before you begin, make sure that you understand how DataSync accesses an SMB file server.
", "CreateTask": "Configures a task, which defines where and how DataSync transfers your data.
A task includes a source location, a destination location, and the preferences for how and when you want to transfer your data (such as bandwidth limits, scheduling, among other options).
If you're planning to transfer data to or from an Amazon S3 location, review how DataSync can affect your S3 request charges and the DataSync pricing page before you begin.
Deletes an agent. To specify which agent to delete, use the Amazon Resource Name (ARN) of the agent in your request. The operation disassociates the agent from your Amazon Web Services account. However, it doesn't delete the agent virtual machine (VM) from your on-premises environment.
", "DeleteLocation": "Deletes the configuration of a location used by DataSync.
", "DeleteTask": "Deletes an DataSync task.
", "DescribeAgent": "Returns metadata about an DataSync agent, such as its name, endpoint type, and status.
", "DescribeDiscoveryJob": "Returns information about a DataSync discovery job.
", + "DescribeLocationAzureBlob": "Provides details about how an DataSync transfer location for Microsoft Azure Blob Storage is configured.
", "DescribeLocationEfs": "Returns metadata about your DataSync location for an Amazon EFS file system.
", "DescribeLocationFsxLustre": "Provides details about how an DataSync location for an Amazon FSx for Lustre file system is configured.
", "DescribeLocationFsxOntap": "Provides details about how an DataSync location for an Amazon FSx for NetApp ONTAP file system is configured.
If your location uses SMB, the DescribeLocationFsxOntap
operation doesn't actually return a Password
.
Returns information about an on-premises storage system that you're using with DataSync Discovery.
", "DescribeStorageSystemResourceMetrics": "Returns information, including performance data and capacity usage, which DataSync Discovery collects about a specific resource in your-premises storage system.
", "DescribeStorageSystemResources": "Returns information that DataSync Discovery collects about resources in your on-premises storage system.
", - "DescribeTask": "Returns metadata about a task.
", - "DescribeTaskExecution": "Returns detailed metadata about a task that is being executed.
", + "DescribeTask": "Provides information about an DataSync transfer task.
", + "DescribeTaskExecution": "Provides information about an DataSync transfer task that's running.
", "GenerateRecommendations": "Creates recommendations about where to migrate your data to in Amazon Web Services. Recommendations are generated based on information that DataSync Discovery collects about your on-premises storage system's resources. For more information, see Recommendations provided by DataSync Discovery.
Once generated, you can view your recommendations by using the DescribeStorageSystemResources operation.
If your discovery job completes successfully, you don't need to use this operation. DataSync Discovery generates the recommendations for you automatically.
Returns a list of DataSync agents that belong to an Amazon Web Services account in the Amazon Web Services Region specified in the request.
With pagination, you can reduce the number of agents returned in a response. If you get a truncated list of agents in a response, the response contains a marker that you can specify in your next request to fetch the next page of agents.
ListAgents
is eventually consistent. This means the result of running the operation might not reflect that you just created or deleted an agent. For example, if you create an agent with CreateAgent and then immediately run ListAgents
, that agent might not show up in the list right away. In situations like this, you can always confirm whether an agent has been created (or deleted) by using DescribeAgent.
Provides a list of the existing discovery jobs in the Amazon Web Services Region and Amazon Web Services account where you're using DataSync Discovery.
", @@ -52,6 +54,7 @@ "UntagResource": "Removes tags from an Amazon Web Services resource.
", "UpdateAgent": "Updates the name of an agent.
", "UpdateDiscoveryJob": "Edits a DataSync discovery job configuration.
", + "UpdateLocationAzureBlob": "Modifies some configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync.
", "UpdateLocationHdfs": "Updates some parameters of a previously created location for a Hadoop Distributed File System cluster.
", "UpdateLocationNfs": "Updates some of the parameters of a previously created location for Network File System (NFS) access. For information about creating an NFS location, see Creating a location for NFS.
", "UpdateLocationObjectStorage": "Updates some parameters of an existing object storage location that DataSync accesses for a transfer. For information about creating a self-managed object storage location, see Creating a location for object storage.
", @@ -93,15 +96,18 @@ "AgentArnList": { "base": null, "refs": { + "CreateLocationAzureBlobRequest$AgentArns": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect with your Azure Blob Storage container.
You can specify more than one agent. For more information, see Using multiple agents for your transfer.
", "CreateLocationHdfsRequest$AgentArns": "The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster.
", "CreateLocationObjectStorageRequest$AgentArns": "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can securely connect with your location.
", "CreateLocationS3Request$AgentArns": "If you're using DataSync on an Amazon Web Services Outpost, specify the Amazon Resource Names (ARNs) of the DataSync agents deployed on your Outpost. For more information about launching a DataSync agent on an Amazon Web Services Outpost, see Deploy your DataSync agent on Outposts.
", "CreateLocationSmbRequest$AgentArns": "Specifies the DataSync agent (or agents) which you want to connect to your SMB file server. You specify an agent by using its Amazon Resource Name (ARN).
", + "DescribeLocationAzureBlobResponse$AgentArns": "The ARNs of the DataSync agents that can connect with your Azure Blob Storage container.
", "DescribeLocationHdfsResponse$AgentArns": "The ARNs of the agents that are used to connect to the HDFS cluster.
", "DescribeLocationObjectStorageResponse$AgentArns": "The ARNs of the DataSync agents that can securely connect with your location.
", "DescribeLocationS3Response$AgentArns": "If you are using DataSync on an Amazon Web Services Outpost, the Amazon Resource Name (ARNs) of the EC2 agents deployed on your Outpost. For more information about launching a DataSync agent on an Amazon Web Services Outpost, see Deploy your DataSync agent on Outposts.
", "DescribeLocationSmbResponse$AgentArns": "The Amazon Resource Name (ARN) of the source SMB file system location that is created.
", "OnPremConfig$AgentArns": "ARNs of the agents to use for an NFS location.
", + "UpdateLocationAzureBlobRequest$AgentArns": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect with your Azure Blob Storage container.
You can specify more than one agent. For more information, see Using multiple agents for your transfer.
", "UpdateLocationHdfsRequest$AgentArns": "The ARNs of the agents that are used to connect to the HDFS cluster.
", "UpdateLocationObjectStorageRequest$AgentArns": "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can securely connect with your location.
", "UpdateLocationSmbRequest$AgentArns": "The Amazon Resource Names (ARNs) of agents to use for a Simple Message Block (SMB) location.
" @@ -132,6 +138,56 @@ "Options$Atime": "Specifies whether to preserve metadata indicating the last time a file was read or written to. If you set Atime
to BEST_EFFORT
, DataSync attempts to preserve the original Atime
attribute on all source files (that is, the version before the PREPARING
phase of the task execution).
The behavior of Atime
isn't fully standard across platforms, so DataSync can only do this on a best-effort basis.
Default value: BEST_EFFORT
BEST_EFFORT
: Attempt to preserve the per-file Atime
value (recommended).
NONE
: Ignore Atime
.
If Atime
is set to BEST_EFFORT
, Mtime
must be set to PRESERVE
.
If Atime
is set to NONE
, Mtime
must also be NONE
.
Specifies the access tier that you want your objects or files transferred into. This only applies when using the location as a transfer destination. For more information, see Access tiers.
", + "DescribeLocationAzureBlobResponse$AccessTier": "The access tier that you want your objects or files transferred into. This only applies when using the location as a transfer destination. For more information, see Access tiers.
", + "UpdateLocationAzureBlobRequest$AccessTier": "Specifies the access tier that you want your objects or files transferred into. This only applies when using the location as a transfer destination. For more information, see Access tiers.
" + } + }, + "AzureBlobAuthenticationType": { + "base": null, + "refs": { + "CreateLocationAzureBlobRequest$AuthenticationType": "Specifies the authentication method DataSync uses to access your Azure Blob Storage. DataSync can access blob storage using a shared access signature (SAS).
", + "DescribeLocationAzureBlobResponse$AuthenticationType": "The authentication method DataSync uses to access your Azure Blob Storage. DataSync can access blob storage using a shared access signature (SAS).
", + "UpdateLocationAzureBlobRequest$AuthenticationType": "Specifies the authentication method DataSync uses to access your Azure Blob Storage. DataSync can access blob storage using a shared access signature (SAS).
" + } + }, + "AzureBlobContainerUrl": { + "base": null, + "refs": { + "CreateLocationAzureBlobRequest$ContainerUrl": "Specifies the URL of the Azure Blob Storage container involved in your transfer.
" + } + }, + "AzureBlobSasConfiguration": { + "base": "The shared access signature (SAS) configuration that allows DataSync to access your Microsoft Azure Blob Storage.
For more information, see SAS tokens for accessing your Azure Blob Storage.
", + "refs": { + "CreateLocationAzureBlobRequest$SasConfiguration": "Specifies the SAS configuration that allows DataSync to access your Azure Blob Storage.
", + "UpdateLocationAzureBlobRequest$SasConfiguration": "Specifies the SAS configuration that allows DataSync to access your Azure Blob Storage.
" + } + }, + "AzureBlobSasToken": { + "base": null, + "refs": { + "AzureBlobSasConfiguration$Token": "Specifies a SAS token that provides permissions at the Azure storage account, container, or folder level.
The token is part of the SAS URI string that comes after the storage resource URI and a question mark. A token looks something like this:
sp=r&st=2023-12-20T14:54:52Z&se=2023-12-20T22:54:52Z&spr=https&sv=2021-06-08&sr=c&sig=aBBKDWQvyuVcTPH9EBp%2FXTI9E%2F%2Fmq171%2BZU178wcwqU%3D
Specifies path segments if you want to limit your transfer to a virtual directory in your container (for example, /my/images
).
Specifies path segments if you want to limit your transfer to a virtual directory in your container (for example, /my/images
).
Specifies the type of blob that you want your objects or files to be when transferring them into Azure Blob Storage. Currently, DataSync only supports moving data into Azure Blob Storage as block blobs. For more information on blob types, see the Azure Blob Storage documentation.
", + "DescribeLocationAzureBlobResponse$BlobType": "The type of blob that you want your objects or files to be when transferring them into Azure Blob Storage. Currently, DataSync only supports moving data into Azure Blob Storage as block blobs. For more information on blob types, see the Azure Blob Storage documentation.
", + "UpdateLocationAzureBlobRequest$BlobType": "Specifies the type of blob that you want your objects or files to be when transferring them into Azure Blob Storage. Currently, DataSync only supports moving data into Azure Blob Storage as block blobs. For more information on blob types, see the Azure Blob Storage documentation.
" + } + }, "BytesPerSecond": { "base": null, "refs": { @@ -172,6 +228,16 @@ "refs": { } }, + "CreateLocationAzureBlobRequest": { + "base": null, + "refs": { + } + }, + "CreateLocationAzureBlobResponse": { + "base": null, + "refs": { + } + }, "CreateLocationEfsRequest": { "base": "CreateLocationEfsRequest
", "refs": { @@ -339,6 +405,16 @@ "refs": { } }, + "DescribeLocationAzureBlobRequest": { + "base": null, + "refs": { + } + }, + "DescribeLocationAzureBlobResponse": { + "base": null, + "refs": { + } + }, "DescribeLocationEfsRequest": { "base": "DescribeLocationEfsRequest
", "refs": { @@ -498,9 +574,9 @@ "DiscoveryAgentArnList": { "base": null, "refs": { - "AddStorageSystemRequest$AgentArns": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads from your on-premises storage system's management interface.
", + "AddStorageSystemRequest$AgentArns": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads from your on-premises storage system's management interface. You can only specify one ARN.
", "DescribeStorageSystemResponse$AgentArns": "The ARN of the DataSync agent that connects to and reads from your on-premises storage system.
", - "UpdateStorageSystemRequest$AgentArns": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads your on-premises storage system.
" + "UpdateStorageSystemRequest$AgentArns": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads your on-premises storage system. You can only specify one ARN.
" } }, "DiscoveryJobArn": { @@ -688,7 +764,7 @@ "Endpoint": { "base": null, "refs": { - "PrivateLinkConfig$PrivateLinkEndpoint": "The private endpoint that is configured for an agent that has access to IP addresses in a PrivateLink. An agent that is configured with this endpoint will not be accessible over the public internet.
" + "PrivateLinkConfig$PrivateLinkEndpoint": "Specifies the VPC endpoint provided by Amazon Web Services PrivateLink that your agent connects to.
" } }, "EndpointType": { @@ -921,13 +997,14 @@ "refs": { "AddStorageSystemRequest$Tags": "Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your on-premises storage system.
", "CreateAgentRequest$Tags": "Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least one tag for your agent.
", + "CreateLocationAzureBlobRequest$Tags": "Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your transfer location.
", "CreateLocationEfsRequest$Tags": "Specifies the key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location.
", "CreateLocationFsxLustreRequest$Tags": "The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location.
", "CreateLocationFsxOntapRequest$Tags": "Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your location.
", "CreateLocationFsxOpenZfsRequest$Tags": "The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location.
", "CreateLocationFsxWindowsRequest$Tags": "Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your location.
", "CreateLocationHdfsRequest$Tags": "The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.
", - "CreateLocationNfsRequest$Tags": "The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.
", + "CreateLocationNfsRequest$Tags": "Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your location.
", "CreateLocationObjectStorageRequest$Tags": "Specifies the key-value pair that represents a tag that you want to add to the resource. Tags can help you manage, filter, and search for your resources. We recommend creating a name tag for your location.
", "CreateLocationS3Request$Tags": "The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.
", "CreateLocationSmbRequest$Tags": "Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your location.
", @@ -1056,19 +1133,22 @@ "LocationArn": { "base": null, "refs": { + "CreateLocationAzureBlobResponse$LocationArn": "The ARN of the Azure Blob Storage transfer location that you created.
", "CreateLocationEfsResponse$LocationArn": "The Amazon Resource Name (ARN) of the Amazon EFS file system location that you create.
", "CreateLocationFsxLustreResponse$LocationArn": "The Amazon Resource Name (ARN) of the FSx for Lustre file system location that's created.
", "CreateLocationFsxOntapResponse$LocationArn": "Specifies the ARN of the FSx for ONTAP file system location that you create.
", "CreateLocationFsxOpenZfsResponse$LocationArn": "The ARN of the FSx for OpenZFS file system location that you created.
", "CreateLocationFsxWindowsResponse$LocationArn": "The ARN of the FSx for Windows File Server file system location you created.
", "CreateLocationHdfsResponse$LocationArn": "The ARN of the source HDFS cluster location that's created.
", - "CreateLocationNfsResponse$LocationArn": "The Amazon Resource Name (ARN) of the source NFS file system location that is created.
", + "CreateLocationNfsResponse$LocationArn": "The ARN of the transfer location that you created for your NFS file server.
", "CreateLocationObjectStorageResponse$LocationArn": "Specifies the ARN of the object storage system location that you create.
", "CreateLocationS3Response$LocationArn": "The Amazon Resource Name (ARN) of the source Amazon S3 bucket location that is created.
", "CreateLocationSmbResponse$LocationArn": "The ARN of the SMB location that you created.
", "CreateTaskRequest$SourceLocationArn": "The Amazon Resource Name (ARN) of the source location for the task.
", "CreateTaskRequest$DestinationLocationArn": "The Amazon Resource Name (ARN) of an Amazon Web Services storage resource's location.
", "DeleteLocationRequest$LocationArn": "The Amazon Resource Name (ARN) of the location to delete.
", + "DescribeLocationAzureBlobRequest$LocationArn": "Specifies the Amazon Resource Name (ARN) of your Azure Blob Storage transfer location.
", + "DescribeLocationAzureBlobResponse$LocationArn": "The ARN of your Azure Blob Storage transfer location.
", "DescribeLocationEfsRequest$LocationArn": "The Amazon Resource Name (ARN) of the Amazon EFS file system location that you want information about.
", "DescribeLocationEfsResponse$LocationArn": "The ARN of the Amazon EFS file system location.
", "DescribeLocationFsxLustreRequest$LocationArn": "The Amazon Resource Name (ARN) of the FSx for Lustre location to describe.
", @@ -1092,8 +1172,9 @@ "DescribeTaskResponse$SourceLocationArn": "The Amazon Resource Name (ARN) of the source file system's location.
", "DescribeTaskResponse$DestinationLocationArn": "The Amazon Resource Name (ARN) of the Amazon Web Services storage resource's location.
", "LocationListEntry$LocationArn": "The Amazon Resource Name (ARN) of the location. For Network File System (NFS) or Amazon EFS, the location is the export path. For Amazon S3, the location is the prefix path that you want to mount and use as the root of the location.
", + "UpdateLocationAzureBlobRequest$LocationArn": "Specifies the ARN of the Azure Blob Storage transfer location that you're updating.
", "UpdateLocationHdfsRequest$LocationArn": "The Amazon Resource Name (ARN) of the source HDFS cluster location.
", - "UpdateLocationNfsRequest$LocationArn": "The Amazon Resource Name (ARN) of the NFS location to update.
", + "UpdateLocationNfsRequest$LocationArn": "Specifies the Amazon Resource Name (ARN) of the NFS location that you want to update.
", "UpdateLocationObjectStorageRequest$LocationArn": "Specifies the ARN of the object storage system location that you're updating.
", "UpdateLocationSmbRequest$LocationArn": "The Amazon Resource Name (ARN) of the SMB location to update.
" } @@ -1131,6 +1212,7 @@ "LocationUri": { "base": null, "refs": { + "DescribeLocationAzureBlobResponse$LocationUri": "The URL of the Azure Blob Storage container involved in your transfer.
", "DescribeLocationEfsResponse$LocationUri": "The URL of the Amazon EFS file system location.
", "DescribeLocationFsxLustreResponse$LocationUri": "The URI of the FSx for Lustre location that was described.
", "DescribeLocationFsxOntapResponse$LocationUri": "The uniform resource identifier (URI) of the FSx for ONTAP file system location.
", @@ -1261,8 +1343,8 @@ "NfsMountOptions": { "base": "Specifies how DataSync can access a location using the NFS protocol.
", "refs": { - "CreateLocationNfsRequest$MountOptions": "The NFS mount options that DataSync can use to mount your NFS share.
", - "DescribeLocationNfsResponse$MountOptions": "The NFS mount options that DataSync used to mount your NFS share.
", + "CreateLocationNfsRequest$MountOptions": "Specifies the mount options that DataSync can use to mount your NFS share.
", + "DescribeLocationNfsResponse$MountOptions": "The mount options that DataSync uses to mount your NFS share.
", "FsxProtocolNfs$MountOptions": null, "UpdateLocationNfsRequest$MountOptions": null } @@ -1270,8 +1352,8 @@ "NfsSubdirectory": { "base": null, "refs": { - "CreateLocationNfsRequest$Subdirectory": "The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.
To see all the paths exported by your NFS server, run \"showmount -e nfs-server-name
\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.
To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash,
or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Red Hat Enterprise Linux documentation.
", - "UpdateLocationNfsRequest$Subdirectory": "The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.
To see all the paths exported by your NFS server, run \"showmount -e nfs-server-name
\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.
To transfer all the data in the folder that you specified, DataSync must have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash
, or ensure that the files you want DataSync to access have permissions that allow read access for all users. Doing either option enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Red Hat Enterprise Linux documentation.
" + "CreateLocationNfsRequest$Subdirectory": "Specifies the subdirectory in the NFS file server that DataSync transfers to or from. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.
To see all the paths exported by your NFS server, run \"showmount -e nfs-server-name
\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.
To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash,
or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
", + "UpdateLocationNfsRequest$Subdirectory": "Specifies the subdirectory in your NFS file system that DataSync uses to read from or write to during a transfer. The NFS path should be exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.
To see all the paths exported by your NFS server, run \"showmount -e nfs-server-name
\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.
To transfer all the data in the folder that you specified, DataSync must have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash
, or ensure that the files you want DataSync to access have permissions that allow read access for all users. Doing either option enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
" } }, "NfsVersion": { @@ -1388,7 +1470,7 @@ "OnPremConfig": { "base": "A list of Amazon Resource Names (ARNs) of agents to use for a Network File System (NFS) location.
", "refs": { - "CreateLocationNfsRequest$OnPremConfig": "Contains a list of Amazon Resource Names (ARNs) of agents that are used to connect to an NFS server.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
", + "CreateLocationNfsRequest$OnPremConfig": "Specifies the Amazon Resource Names (ARNs) of agents that DataSync uses to connect to your NFS file server.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
", "DescribeLocationNfsResponse$OnPremConfig": null, "UpdateLocationNfsRequest$OnPremConfig": null } @@ -1432,15 +1514,15 @@ "PLSecurityGroupArnList": { "base": null, "refs": { - "CreateAgentRequest$SecurityGroupArns": "Specifies the Amazon Resource Name (ARN) of the security group that protects your task's network interfaces when using a virtual private cloud (VPC) endpoint.
", - "PrivateLinkConfig$SecurityGroupArns": "The Amazon Resource Names (ARNs) of the security groups that are configured for the EC2 resource that hosts an agent activated in a VPC or an agent that has access to a VPC endpoint.
" + "CreateAgentRequest$SecurityGroupArns": "Specifies the Amazon Resource Name (ARN) of the security group that protects your task's network interfaces when using a virtual private cloud (VPC) endpoint. You can only specify one ARN.
", + "PrivateLinkConfig$SecurityGroupArns": "Specifies the Amazon Resource Names (ARN) of the security group that provides DataSync access to your VPC endpoint. You can only specify one ARN.
" } }, "PLSubnetArnList": { "base": null, "refs": { - "CreateAgentRequest$SubnetArns": "Specifies the ARN of the subnet where you want to run your DataSync task when using a VPC endpoint. This is the subnet where DataSync creates and manages the network interfaces for your transfer.
", - "PrivateLinkConfig$SubnetArns": "The Amazon Resource Names (ARNs) of the subnets that are configured for an agent activated in a VPC or an agent that has access to a VPC endpoint.
" + "CreateAgentRequest$SubnetArns": "Specifies the ARN of the subnet where you want to run your DataSync task when using a VPC endpoint. This is the subnet where DataSync creates and manages the network interfaces for your transfer. You can only specify one ARN.
", + "PrivateLinkConfig$SubnetArns": "Specifies the ARN of the subnet where your VPC endpoint is located. You can only specify one ARN.
" } }, "PhaseStatus": { @@ -1470,7 +1552,7 @@ } }, "PrivateLinkConfig": { - "base": "The VPC endpoint, subnet, and security group that an agent uses to access IP addresses in a VPC (Virtual Private Cloud).
", + "base": "Specifies how your DataSync agent connects to Amazon Web Services using a virtual private cloud (VPC) service endpoint. An agent that uses a VPC endpoint isn't accessible over the public internet.
", "refs": { "DescribeAgentResponse$PrivateLinkConfig": "The subnet and the security group that DataSync used to access a VPC endpoint.
" } @@ -1643,7 +1725,7 @@ "ServerHostname": { "base": null, "refs": { - "CreateLocationNfsRequest$ServerHostname": "The name of the NFS server. This value is the IP address or Domain Name Service (DNS) name of the NFS server. An agent that is installed on-premises uses this hostname to mount the NFS server in a network.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
This name must either be DNS-compliant or must be an IP version 4 (IPv4) address.
Specifies the IP address or domain name of your NFS file server. An agent that is installed on-premises uses this hostname to mount the NFS server in a network.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
You must specify be an IP version 4 address or Domain Name System (DNS)-compliant name.
Specifies the domain name or IP address of the object storage server. A DataSync agent uses this hostname to mount the object storage server in a network.
", "CreateLocationSmbRequest$ServerHostname": "Specifies the Domain Name Service (DNS) name or IP address of the SMB file server that your DataSync agent will mount.
You can't specify an IP version 6 (IPv6) address.
The Amazon Resource Name (ARN) of the task.
", "DeleteTaskRequest$TaskArn": "Specifies the Amazon Resource Name (ARN) of the task that you want to delete.
", - "DescribeTaskRequest$TaskArn": "The Amazon Resource Name (ARN) of the task to describe.
", + "DescribeTaskRequest$TaskArn": "Specifies the Amazon Resource Name (ARN) of the transfer task.
", "DescribeTaskResponse$TaskArn": "The Amazon Resource Name (ARN) of the task that was described.
", "ListTaskExecutionsRequest$TaskArn": "The Amazon Resource Name (ARN) of the task whose tasks you want to list.
", "StartTaskExecutionRequest$TaskArn": "Specifies the Amazon Resource Name (ARN) of the task that you want to start.
", @@ -1851,7 +1933,7 @@ "base": null, "refs": { "CancelTaskExecutionRequest$TaskExecutionArn": "The Amazon Resource Name (ARN) of the task execution to stop.
", - "DescribeTaskExecutionRequest$TaskExecutionArn": "The Amazon Resource Name (ARN) of the task that is being executed.
", + "DescribeTaskExecutionRequest$TaskExecutionArn": "Specifies the Amazon Resource Name (ARN) of the transfer task that's running.
", "DescribeTaskExecutionResponse$TaskExecutionArn": "The Amazon Resource Name (ARN) of the task execution that was described. TaskExecutionArn
is hierarchical and includes TaskArn
for the task that was executed.
For example, a TaskExecution
value with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2/execution/exec-08ef1e88ec491019b
executed the task with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2
.
The Amazon Resource Name (ARN) of the task execution that is transferring files.
", "StartTaskExecutionResponse$TaskExecutionArn": "The ARN of the running task execution.
", @@ -1946,6 +2028,7 @@ "refs": { "DescribeAgentResponse$LastConnectionTime": "The time that the agent last connected to DataSync.
", "DescribeAgentResponse$CreationTime": "The time that the agent was activated (that is, created in your account).
", + "DescribeLocationAzureBlobResponse$CreationTime": "The time that your Azure Blob Storage transfer location was created.
", "DescribeLocationEfsResponse$CreationTime": "The time that the location was created.
", "DescribeLocationFsxLustreResponse$CreationTime": "The time that the FSx for Lustre location was created.
", "DescribeLocationFsxOntapResponse$CreationTime": "The time that the location was created.
", @@ -2009,6 +2092,16 @@ "refs": { } }, + "UpdateLocationAzureBlobRequest": { + "base": null, + "refs": { + } + }, + "UpdateLocationAzureBlobResponse": { + "base": null, + "refs": { + } + }, "UpdateLocationHdfsRequest": { "base": null, "refs": { @@ -2089,7 +2182,7 @@ "base": null, "refs": { "CreateAgentRequest$VpcEndpointId": "Specifies the ID of the VPC endpoint that you want your agent to connect to. For example, a VPC endpoint ID looks like vpce-01234d5aff67890e1
.
The VPC endpoint you use must include the DataSync service name (for example, com.amazonaws.us-east-2.datasync
).
The ID of the VPC endpoint that is configured for an agent. An agent that is configured with a VPC endpoint will not be accessible over the public internet.
" + "PrivateLinkConfig$VpcEndpointId": "Specifies the ID of the VPC endpoint that your agent connects to.
" } }, "long": { diff --git a/models/apis/dynamodb/2012-08-10/docs-2.json b/models/apis/dynamodb/2012-08-10/docs-2.json index 5fa8036a5bc..b4a23a7212a 100644 --- a/models/apis/dynamodb/2012-08-10/docs-2.json +++ b/models/apis/dynamodb/2012-08-10/docs-2.json @@ -42,7 +42,7 @@ "Query": "You must provide the name of the partition key attribute and a single value for that attribute. Query
returns all items with that partition key value. Optionally, you can provide a sort key attribute and use a comparison operator to refine the search results.
Use the KeyConditionExpression
parameter to provide a specific value for the partition key. The Query
operation will return all of the items from the table or index with that partition key value. You can optionally narrow the scope of the Query
operation by specifying a sort key value and a comparison operator in KeyConditionExpression
. To further refine the Query
results, you can optionally provide a FilterExpression
. A FilterExpression
determines which items within the results should be returned to you. All of the other results are discarded.
A Query
operation always returns a result set. If no matching items are found, the result set will be empty. Queries that do not return results consume the minimum number of read capacity units for that type of read operation.
DynamoDB calculates the number of read capacity units consumed based on item size, not on the amount of data that is returned to an application. The number of capacity units consumed will be the same whether you request all of the attributes (the default behavior) or just some of them (using a projection expression). The number will also be the same whether or not you use a FilterExpression
.
Query
results are always sorted by the sort key value. If the data type of the sort key is Number, the results are returned in numeric order; otherwise, the results are returned in order of UTF-8 bytes. By default, the sort order is ascending. To reverse the order, set the ScanIndexForward
parameter to false.
A single Query
operation will read up to the maximum number of items set (if using the Limit
parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression
. If LastEvaluatedKey
is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.
FilterExpression
is applied after a Query
finishes, but before the results are returned. A FilterExpression
cannot contain partition key or sort key attributes. You need to specify those attributes in the KeyConditionExpression
.
A Query
operation can return an empty result set and a LastEvaluatedKey
if all the items read for the page of results are filtered out.
You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set the ConsistentRead
parameter to true
and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead
when querying a global secondary index.
Creates a new table from an existing backup. Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account.
You can call RestoreTableFromBackup
at a maximum rate of 10 times per second.
You must manually set up the following on the restored table:
Auto scaling policies
IAM policies
Amazon CloudWatch metrics and alarms
Tags
Stream settings
Time to Live (TTL) settings
Restores the specified table to the specified point in time within EarliestRestorableDateTime
and LatestRestorableDateTime
. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.
When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.
Along with data, the following are also included on the new restored table using point in time recovery:
Global secondary indexes (GSIs)
Local secondary indexes (LSIs)
Provisioned read and write capacity
Encryption settings
All these settings come from the current settings of the source table at the time of restore.
You must manually set up the following on the restored table:
Auto scaling policies
IAM policies
Amazon CloudWatch metrics and alarms
Tags
Stream settings
Time to Live (TTL) settings
Point in time recovery settings
The Scan
operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression
operation.
If the total number of scanned items exceeds the maximum dataset size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey
value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.
A single Scan
operation reads up to the maximum number of items set (if using the Limit
parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression
. If LastEvaluatedKey
is present in the response, you need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.
Scan
operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan
operation by providing the Segment
and TotalSegments
parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.
Scan
uses eventually consistent reads when accessing the data in a table; therefore, the result set might not include the changes to data in the table immediately before the operation began. If you need a consistent copy of the data, as of the time that the Scan
begins, you can set the ConsistentRead
parameter to true
.
The Scan
operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression
operation.
If the total size of scanned items exceeds the maximum dataset size limit of 1 MB, the scan completes and results are returned to the user. The LastEvaluatedKey
value is also returned and the requestor can use the LastEvaluatedKey
to continue the scan in a subsequent operation. Each scan response also includes number of items that were scanned (ScannedCount) as part of the request. If using a FilterExpression
, a scan result can result in no items meeting the criteria and the Count
will result in zero. If you did not use a FilterExpression
in the scan request, then Count
is the same as ScannedCount
.
Count
and ScannedCount
only return the count of items specific to a single scan request and, unless the table is less than 1MB, do not represent the total number of items in the table.
A single Scan
operation first reads up to the maximum number of items set (if using the Limit
parameter) or a maximum of 1 MB of data and then applies any filtering to the results if a FilterExpression
is provided. If LastEvaluatedKey
is present in the response, pagination is required to complete the full table scan. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.
Scan
operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan
operation by providing the Segment
and TotalSegments
parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.
By default, a Scan
uses eventually consistent reads when accessing the items in a table. Therefore, the results from an eventually consistent Scan
may not include the latest item changes at the time the scan iterates through each item in the table. If you require a strongly consistent read of each item as the scan iterates through the items in the table, you can set the ConsistentRead
parameter to true. Strong consistency only relates to the consistency of the read at the item level.
DynamoDB does not provide snapshot isolation for a scan operation when the ConsistentRead
parameter is set to true. Thus, a DynamoDB scan operation does not guarantee that all reads in a scan see a consistent snapshot of the table when the scan operation was requested.
Associate a set of tags with an Amazon DynamoDB resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking. You can call TagResource up to five times per second, per account.
For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.
", "TransactGetItems": " TransactGetItems
is a synchronous operation that atomically retrieves multiple items from one or more tables (but not from indexes) in a single account and Region. A TransactGetItems
call can contain up to 100 TransactGetItem
objects, each of which contains a Get
structure that specifies an item to retrieve from a table in the account and Region. A call to TransactGetItems
cannot retrieve items from tables in more than one Amazon Web Services account or Region. The aggregate size of the items in the transaction cannot exceed 4 MB.
DynamoDB rejects the entire TransactGetItems
request if any of the following is true:
A conflicting operation is in the process of updating an item to be read.
There is insufficient provisioned capacity for the transaction to be completed.
There is a user error, such as an invalid data format.
The aggregate size of the items in the transaction exceeded 4 MB.
TransactWriteItems
is a synchronous write operation that groups up to 100 action requests. These actions can target items in different tables, but not in different Amazon Web Services accounts or Regions, and no two actions can target the same item. For example, you cannot both ConditionCheck
and Update
the same item. The aggregate size of the items in the transaction cannot exceed 4 MB.
The actions are completed atomically so that either all of them succeed, or all of them fail. They are defined by the following objects:
Put
— Initiates a PutItem
operation to write a new item. This structure specifies the primary key of the item to be written, the name of the table to write it in, an optional condition expression that must be satisfied for the write to succeed, a list of the item's attributes, and a field indicating whether to retrieve the item's attributes if the condition is not met.
Update
— Initiates an UpdateItem
operation to update an existing item. This structure specifies the primary key of the item to be updated, the name of the table where it resides, an optional condition expression that must be satisfied for the update to succeed, an expression that defines one or more attributes to be updated, and a field indicating whether to retrieve the item's attributes if the condition is not met.
Delete
— Initiates a DeleteItem
operation to delete an existing item. This structure specifies the primary key of the item to be deleted, the name of the table where it resides, an optional condition expression that must be satisfied for the deletion to succeed, and a field indicating whether to retrieve the item's attributes if the condition is not met.
ConditionCheck
— Applies a condition to an item that is not being modified by the transaction. This structure specifies the primary key of the item to be checked, the name of the table where it resides, a condition expression that must be satisfied for the transaction to succeed, and a field indicating whether to retrieve the item's attributes if the condition is not met.
DynamoDB rejects the entire TransactWriteItems
request if any of the following is true:
A condition in one of the condition expressions is not met.
An ongoing operation is in the process of updating the same item.
There is insufficient provisioned capacity for the transaction to be completed.
An item size becomes too large (bigger than 400 KB), a local secondary index (LSI) becomes too large, or a similar validation error occurs because of changes made by the transaction.
The aggregate size of the items in the transaction exceeds 4 MB.
There is a user error, such as an invalid data format.
The response to each PartiQL statement in the batch.
" + "BatchExecuteStatementOutput$Responses": "The response to each PartiQL statement in the batch. The values of the list are ordered according to the ordering of the request statements.
" } }, "PartiQLNextToken": { @@ -2505,7 +2505,7 @@ "ParameterizedStatement$ReturnValuesOnConditionCheckFailure": "An optional parameter that returns the item attributes for a PartiQL ParameterizedStatement
operation that failed a condition check.
There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No read capacity units are consumed.
", "Put$ReturnValuesOnConditionCheckFailure": "Use ReturnValuesOnConditionCheckFailure
to get the item attributes if the Put
condition fails. For ReturnValuesOnConditionCheckFailure
, the valid values are: NONE and ALL_OLD.
An optional parameter that returns the item attributes for a PutItem
operation that failed a condition check.
There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No read capacity units are consumed.
", - "Update$ReturnValuesOnConditionCheckFailure": "Use ReturnValuesOnConditionCheckFailure
to get the item attributes if the Update
condition fails. For ReturnValuesOnConditionCheckFailure
, the valid values are: NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW.
Use ReturnValuesOnConditionCheckFailure
to get the item attributes if the Update
condition fails. For ReturnValuesOnConditionCheckFailure
, the valid values are: NONE and ALL_OLD.
An optional parameter that returns the item attributes for an UpdateItem
operation that failed a condition check.
There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No read capacity units are consumed.
" } }, diff --git a/models/apis/ec2/2016-11-15/api-2.json b/models/apis/ec2/2016-11-15/api-2.json index 78a1331a5ef..3e9207f613f 100755 --- a/models/apis/ec2/2016-11-15/api-2.json +++ b/models/apis/ec2/2016-11-15/api-2.json @@ -7676,6 +7676,7 @@ ] }, "BareMetalFlag":{"type":"boolean"}, + "BaselineBandwidthInGbps":{"type":"double"}, "BaselineBandwidthInMbps":{"type":"integer"}, "BaselineEbsBandwidthMbps":{ "type":"structure", @@ -25038,6 +25039,10 @@ "Accelerators":{ "shape":"InferenceDeviceInfoList", "locationName":"accelerators" + }, + "TotalInferenceMemoryInMiB":{ + "shape":"totalInferenceMemory", + "locationName":"totalInferenceMemoryInMiB" } } }, @@ -25056,6 +25061,10 @@ "Manufacturer":{ "shape":"InferenceDeviceManufacturerName", "locationName":"manufacturer" + }, + "MemoryInfo":{ + "shape":"InferenceDeviceMemoryInfo", + "locationName":"memoryInfo" } } }, @@ -25065,6 +25074,16 @@ "locationName":"item" }, "InferenceDeviceManufacturerName":{"type":"string"}, + "InferenceDeviceMemoryInfo":{ + "type":"structure", + "members":{ + "SizeInMiB":{ + "shape":"InferenceDeviceMemorySize", + "locationName":"sizeInMiB" + } + } + }, + "InferenceDeviceMemorySize":{"type":"integer"}, "InferenceDeviceName":{"type":"string"}, "InsideCidrBlocksStringList":{ "type":"list", @@ -32768,6 +32787,14 @@ "MaximumNetworkInterfaces":{ "shape":"MaxNetworkInterfaces", "locationName":"maximumNetworkInterfaces" + }, + "BaselineBandwidthInGbps":{ + "shape":"BaselineBandwidthInGbps", + "locationName":"baselineBandwidthInGbps" + }, + "PeakBandwidthInGbps":{ + "shape":"PeakBandwidthInGbps", + "locationName":"peakBandwidthInGbps" } } }, @@ -33953,6 +33980,7 @@ "SubsystemVendorId":{"shape":"String"} } }, + "PeakBandwidthInGbps":{"type":"double"}, "PeeringAttachmentStatus":{ "type":"structure", "members":{ @@ -45100,6 +45128,7 @@ } }, "totalFpgaMemory":{"type":"integer"}, - "totalGpuMemory":{"type":"integer"} + "totalGpuMemory":{"type":"integer"}, + "totalInferenceMemory":{"type":"integer"} } } diff --git a/models/apis/ec2/2016-11-15/docs-2.json b/models/apis/ec2/2016-11-15/docs-2.json index 12ec3d31801..6863d8884e4 100755 --- a/models/apis/ec2/2016-11-15/docs-2.json +++ b/models/apis/ec2/2016-11-15/docs-2.json @@ -1791,6 +1791,12 @@ "InstanceTypeInfo$BareMetal": "Indicates whether the instance is a bare metal instance type.
" } }, + "BaselineBandwidthInGbps": { + "base": null, + "refs": { + "NetworkCardInfo$BaselineBandwidthInGbps": "The baseline network performance of the network card, in Gbps.
" + } + }, "BaselineBandwidthInMbps": { "base": null, "refs": { @@ -10708,6 +10714,18 @@ "InferenceDeviceInfo$Manufacturer": "The manufacturer of the Inference accelerator.
" } }, + "InferenceDeviceMemoryInfo": { + "base": "Describes the memory available to the inference accelerator.
", + "refs": { + "InferenceDeviceInfo$MemoryInfo": "Describes the memory available to the inference accelerator.
" + } + }, + "InferenceDeviceMemorySize": { + "base": null, + "refs": { + "InferenceDeviceMemoryInfo$SizeInMiB": "The size of the memory available to the inference accelerator, in MiB.
" + } + }, "InferenceDeviceName": { "base": null, "refs": { @@ -15403,6 +15421,12 @@ "FpgaImage$PciId": "Information about the PCI bus.
" } }, + "PeakBandwidthInGbps": { + "base": null, + "refs": { + "NetworkCardInfo$PeakBandwidthInGbps": "The peak (burst) network performance of the network card, in Gbps.
" + } + }, "PeeringAttachmentStatus": { "base": "The status of the transit gateway peering attachment.
", "refs": { @@ -23385,6 +23409,12 @@ "refs": { "GpuInfo$TotalGpuMemoryInMiB": "The total size of the memory for the GPU accelerators for the instance type, in MiB.
" } + }, + "totalInferenceMemory": { + "base": null, + "refs": { + "InferenceAcceleratorInfo$TotalInferenceMemoryInMiB": "The total size of the memory for the inference accelerators for the instance type, in MiB.
" + } } } } diff --git a/models/apis/emr-serverless/2021-07-13/api-2.json b/models/apis/emr-serverless/2021-07-13/api-2.json index 8ad509800eb..b281eeff262 100644 --- a/models/apis/emr-serverless/2021-07-13/api-2.json +++ b/models/apis/emr-serverless/2021-07-13/api-2.json @@ -409,6 +409,17 @@ "min":1, "pattern":"[A-Za-z0-9._-]+" }, + "CloudWatchLoggingConfiguration":{ + "type":"structure", + "required":["enabled"], + "members":{ + "enabled":{"shape":"Boolean"}, + "logGroupName":{"shape":"LogGroupName"}, + "logStreamNamePrefix":{"shape":"LogStreamNamePrefix"}, + "encryptionKeyArn":{"shape":"EncryptionKeyArn"}, + "logTypes":{"shape":"LogTypeMap"} + } + }, "Configuration":{ "type":"structure", "required":["classification"], @@ -913,6 +924,37 @@ "tags":{"shape":"TagMap"} } }, + "LogGroupName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[\\.\\-_/#A-Za-z0-9]+" + }, + "LogStreamNamePrefix":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[^:*]*" + }, + "LogTypeList":{ + "type":"list", + "member":{"shape":"LogTypeString"}, + "max":5, + "min":1 + }, + "LogTypeMap":{ + "type":"map", + "key":{"shape":"WorkerTypeString"}, + "value":{"shape":"LogTypeList"}, + "max":4, + "min":1 + }, + "LogTypeString":{ + "type":"string", + "max":50, + "min":1, + "pattern":"[a-zA-Z]+[-_]*[a-zA-Z]+" + }, "ManagedPersistenceMonitoringConfiguration":{ "type":"structure", "members":{ @@ -942,7 +984,8 @@ "type":"structure", "members":{ "s3MonitoringConfiguration":{"shape":"S3MonitoringConfiguration"}, - "managedPersistenceMonitoringConfiguration":{"shape":"ManagedPersistenceMonitoringConfiguration"} + "managedPersistenceMonitoringConfiguration":{"shape":"ManagedPersistenceMonitoringConfiguration"}, + "cloudWatchLoggingConfiguration":{"shape":"CloudWatchLoggingConfiguration"} } }, "NetworkConfiguration":{ diff --git a/models/apis/emr-serverless/2021-07-13/docs-2.json b/models/apis/emr-serverless/2021-07-13/docs-2.json index bb953370c5e..8fbfc08bd11 100644 --- a/models/apis/emr-serverless/2021-07-13/docs-2.json +++ b/models/apis/emr-serverless/2021-07-13/docs-2.json @@ -6,7 +6,7 @@ "CreateApplication": "Creates an application.
", "DeleteApplication": "Deletes an application. An application has to be in a stopped or created state in order to be deleted.
", "GetApplication": "Displays detailed information about a specified application.
", - "GetDashboardForJobRun": "Returns a URL to access the job run dashboard. The generated URL is valid for one hour, after which you must invoke the API again to generate a new URL.
", + "GetDashboardForJobRun": "Creates and returns a URL that you can use to access the application UIs for a job run.
For jobs in a running state, the application UI is a live user interface such as the Spark or Tez web UI. For completed jobs, the application UI is a persistent application user interface such as the Spark History Server or persistent Tez UI.
The URL is valid for one hour after you generate it. To access the application UI after that hour elapses, you must invoke the API again to generate a new URL.
Displays detailed information about a job run.
", "ListApplications": "Lists applications based on a set of parameters.
", "ListJobRuns": "Lists job runs based on a set of parameters.
", @@ -127,6 +127,7 @@ "refs": { "AutoStartConfig$enabled": "Enables the application to automatically start on job submission. Defaults to true.
", "AutoStopConfig$enabled": "Enables the application to automatically stop after a certain amount of time being idle. Defaults to true.
", + "CloudWatchLoggingConfiguration$enabled": "Enables CloudWatch logging.
", "ManagedPersistenceMonitoringConfiguration$enabled": "Enables managed logging and defaults to true. If set to false, managed logging will be turned off.
" } }, @@ -148,6 +149,12 @@ "UpdateApplicationRequest$clientToken": "The client idempotency token of the application to update. Its value must be unique for each request.
" } }, + "CloudWatchLoggingConfiguration": { + "base": "The Amazon CloudWatch configuration for monitoring logs. You can configure your jobs to send log information to CloudWatch.
", + "refs": { + "MonitoringConfiguration$cloudWatchLoggingConfiguration": "The Amazon CloudWatch configuration for monitoring logs. You can configure your jobs to send log information to CloudWatch.
" + } + }, "Configuration": { "base": "A configuration specification to be used when provisioning an application. A configuration consists of a classification, properties, and optional nested configurations. A classification refers to an application-specific configuration file. Properties are the settings you want to change in that file.
", "refs": { @@ -243,6 +250,7 @@ "EncryptionKeyArn": { "base": null, "refs": { + "CloudWatchLoggingConfiguration$encryptionKeyArn": "The Key Management Service (KMS) key ARN to encrypt the logs that you store in CloudWatch Logs.
", "ManagedPersistenceMonitoringConfiguration$encryptionKeyArn": "The KMS key ARN to encrypt the logs stored in managed log persistence.
", "S3MonitoringConfiguration$encryptionKeyArn": "The KMS key ARN to encrypt the logs published to the given Amazon S3 destination.
" } @@ -489,6 +497,36 @@ "refs": { } }, + "LogGroupName": { + "base": null, + "refs": { + "CloudWatchLoggingConfiguration$logGroupName": "The name of the log group in Amazon CloudWatch Logs where you want to publish your logs.
" + } + }, + "LogStreamNamePrefix": { + "base": null, + "refs": { + "CloudWatchLoggingConfiguration$logStreamNamePrefix": "Prefix for the CloudWatch log stream name.
" + } + }, + "LogTypeList": { + "base": null, + "refs": { + "LogTypeMap$value": null + } + }, + "LogTypeMap": { + "base": null, + "refs": { + "CloudWatchLoggingConfiguration$logTypes": "The types of logs that you want to publish to CloudWatch. If you don't specify any log types, driver STDOUT and STDERR logs will be published to CloudWatch Logs by default. For more information including the supported worker types for Hive and Spark, see Logging for EMR Serverless with CloudWatch.
Key Valid Values: SPARK_DRIVER
, SPARK_EXECUTOR
, HIVE_DRIVER
, TEZ_TASK
Array Members Valid Values: STDOUT
, STDERR
, HIVE_LOG
, TEZ_AM
, SYSTEM_LOGS
Log type for a Spark/Hive job-run.
", + "refs": { + "LogTypeList$member": null + } + }, "ManagedPersistenceMonitoringConfiguration": { "base": "The managed log persistence configuration for a job run.
", "refs": { @@ -806,9 +844,10 @@ } }, "WorkerTypeString": { - "base": null, + "base": "Worker type for an analytics framework.
", "refs": { "InitialCapacityConfigMap$key": null, + "LogTypeMap$key": null, "WorkerTypeSpecificationInputMap$key": null, "WorkerTypeSpecificationMap$key": null } diff --git a/models/apis/lambda/2015-03-31/api-2.json b/models/apis/lambda/2015-03-31/api-2.json index e9af8569350..4cb1f3a02d8 100644 --- a/models/apis/lambda/2015-03-31/api-2.json +++ b/models/apis/lambda/2015-03-31/api-2.json @@ -3862,7 +3862,8 @@ "nodejs18.x", "python3.10", "java17", - "ruby3.2" + "ruby3.2", + "python3.11" ] }, "RuntimeVersionArn":{ diff --git a/models/apis/rds/2014-10-31/api-2.json b/models/apis/rds/2014-10-31/api-2.json index 6c63feaecee..0f60386ff84 100644 --- a/models/apis/rds/2014-10-31/api-2.json +++ b/models/apis/rds/2014-10-31/api-2.json @@ -4219,7 +4219,8 @@ "DBSystemId":{"shape":"String"}, "MasterUserSecret":{"shape":"MasterUserSecret"}, "CertificateDetails":{"shape":"CertificateDetails"}, - "ReadReplicaSourceDBClusterIdentifier":{"shape":"String"} + "ReadReplicaSourceDBClusterIdentifier":{"shape":"String"}, + "PercentProgress":{"shape":"String"} }, "wrapper":true }, diff --git a/models/apis/rds/2014-10-31/docs-2.json b/models/apis/rds/2014-10-31/docs-2.json index 95c5479111b..63fea1a58f5 100644 --- a/models/apis/rds/2014-10-31/docs-2.json +++ b/models/apis/rds/2014-10-31/docs-2.json @@ -30,7 +30,7 @@ "CreateDBSnapshot": "Creates a snapshot of a DB instance. The source DB instance must be in the available
or storage-optimization
state.
Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the Amazon Web Services Region.
", "CreateEventSubscription": "Creates an RDS event notification subscription. This operation requires a topic Amazon Resource Name (ARN) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.
You can specify the type of source (SourceType
) that you want to be notified of and provide a list of RDS sources (SourceIds
) that triggers the events. You can also provide a list of event categories (EventCategories
) for events that you want to be notified of. For example, you can specify SourceType
= db-instance
, SourceIds
= mydbinstance1
, mydbinstance2
and EventCategories
= Availability
, Backup
.
If you specify both the SourceType
and SourceIds
, such as SourceType
= db-instance
and SourceIds
= myDBInstance1
, you are notified of all the db-instance
events for the specified source. If you specify a SourceType
but do not specify SourceIds
, you receive notice of the events for that source type for all your RDS sources. If you don't specify either the SourceType or the SourceIds
, you are notified of events generated from all RDS sources belonging to your customer account.
For more information about subscribing to an event for RDS DB engines, see Subscribing to Amazon RDS event notification in the Amazon RDS User Guide.
For more information about subscribing to an event for Aurora DB engines, see Subscribing to Amazon RDS event notification in the Amazon Aurora User Guide.
", - "CreateGlobalCluster": "Creates an Aurora global database spread across multiple Amazon Web Services Regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.
You can create a global database that is initially empty, and then add a primary cluster and a secondary cluster to it. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.
This operation applies only to Aurora DB clusters.
Creates an Aurora global database spread across multiple Amazon Web Services Regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.
You can create a global database that is initially empty, and then create the primary and secondary DB clusters in the global database. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.
This operation applies only to Aurora DB clusters.
Creates a new option group. You can create up to 20 option groups.
This command doesn't apply to RDS Custom.
", "DeleteBlueGreenDeployment": "Deletes a blue/green deployment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
", "DeleteCustomDBEngineVersion": "Deletes a custom engine version. To run this command, make sure you meet the following prerequisites:
The CEV must not be the default for RDS Custom. If it is, change the default before running this command.
The CEV must not be associated with an RDS Custom DB instance, RDS Custom instance snapshot, or automated backup of your RDS Custom instance.
Typically, deletion takes a few minutes.
The MediaImport service that imports files from Amazon S3 to create CEVs isn't integrated with Amazon Web Services CloudTrail. If you turn on data logging for Amazon RDS in CloudTrail, calls to the DeleteCustomDbEngineVersion
event aren't logged. However, you might see calls from the API gateway that accesses your Amazon S3 bucket. These calls originate from the MediaImport service for the DeleteCustomDbEngineVersion
event.
For more information, see Deleting a CEV in the Amazon RDS User Guide.
", @@ -110,7 +110,7 @@ "ModifyDBSnapshotAttribute": "Adds an attribute and values to, or removes an attribute and values from, a manual DB snapshot.
To share a manual DB snapshot with other Amazon Web Services accounts, specify restore
as the AttributeName
and use the ValuesToAdd
parameter to add a list of IDs of the Amazon Web Services accounts that are authorized to restore the manual DB snapshot. Uses the value all
to make the manual DB snapshot public, which means it can be copied or restored by all Amazon Web Services accounts.
Don't add the all
value for any manual DB snapshots that contain private information that you don't want available to all Amazon Web Services accounts.
If the manual DB snapshot is encrypted, it can be shared, but only by specifying a list of authorized Amazon Web Services account IDs for the ValuesToAdd
parameter. You can't use all
as a value for that parameter in this case.
To view which Amazon Web Services accounts have access to copy or restore a manual DB snapshot, or whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API operation. The accounts are returned as values for the restore
attribute.
Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the Amazon Web Services Region.
", "ModifyEventSubscription": "Modifies an existing RDS event notification subscription. You can't modify the source identifiers using this call. To change source identifiers for a subscription, use the AddSourceIdentifierToSubscription
and RemoveSourceIdentifierFromSubscription
calls.
You can see a list of the event categories for a given source type (SourceType
) in Events in the Amazon RDS User Guide or by using the DescribeEventCategories
operation.
Modify a setting for an Amazon Aurora global cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.
This action only applies to Aurora DB clusters.
Modifies a setting for an Amazon Aurora global cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.
This operation only applies to Aurora global database clusters.
Modifies an existing option group.
", "PromoteReadReplica": "Promotes a read replica DB instance to a standalone DB instance.
Backup duration is a function of the amount of changes to the database since the previous backup. If you plan to promote a read replica to a standalone instance, we recommend that you enable backups and complete at least one backup prior to promotion. In addition, a read replica cannot be promoted to a standalone instance when it is in the backing-up
status. If you have enabled backups on your read replica, configure the automated backup window so that daily backups do not interfere with read replica promotion.
This command doesn't apply to Aurora MySQL, Aurora PostgreSQL, or RDS Custom.
Promotes a read replica DB cluster to a standalone DB cluster.
", @@ -601,8 +601,8 @@ "ModifyDBProxyRequest$RequireTLS": "Whether Transport Layer Security (TLS) encryption is required for connections to the proxy. By enabling this setting, you can enforce encrypted TLS connections to the proxy, even if the associated database doesn't use TLS.
", "ModifyDBProxyRequest$DebugLogging": "Whether the proxy includes detailed information about SQL statements in its logs. This information helps you to debug issues involving SQL behavior or the performance and scalability of the proxy connections. The debug information includes the text of SQL statements that you submit through the proxy. Thus, only enable this setting when needed for debugging, and only when you have security measures in place to safeguard any sensitive information that appears in the logs.
", "ModifyEventSubscriptionMessage$Enabled": "A value that indicates whether to activate the subscription.
", - "ModifyGlobalClusterMessage$DeletionProtection": "Indicates if the global database cluster has deletion protection enabled. The global database cluster can't be deleted when deletion protection is enabled.
", - "ModifyGlobalClusterMessage$AllowMajorVersionUpgrade": "A value that indicates whether major version upgrades are allowed.
Constraints: You must allow major version upgrades when specifying a value for the EngineVersion
parameter that is a different major version than the DB cluster's current version.
If you upgrade the major version of a global database, the cluster and DB instance parameter groups are set to the default parameter groups for the new version. Apply any custom parameter groups after completing the upgrade.
", + "ModifyGlobalClusterMessage$DeletionProtection": "Specifies whether to enable deletion protection for the global database cluster. The global database cluster can't be deleted when deletion protection is enabled.
", + "ModifyGlobalClusterMessage$AllowMajorVersionUpgrade": "Specifies whether to allow major version upgrades.
Constraints: Must be enabled if you specify a value for the EngineVersion
parameter that's a different major version than the global cluster's current version.
If you upgrade the major version of a global database, the cluster and DB instance parameter groups are set to the default parameter groups for the new version. Apply any custom parameter groups after completing the upgrade.
", "OptionGroupOption$SupportsOptionVersionDowngrade": "If true, you can change the option to an earlier version of the option. This only applies to options that have different versions available.
", "OptionGroupOption$CopyableCrossAccount": "Specifies whether the option can be copied across Amazon Web Services accounts.
", "OrderableDBInstanceOption$SupportsStorageAutoscaling": "Whether Amazon RDS can automatically scale storage for DB instances that use the specified DB instance class.
", @@ -4235,6 +4235,7 @@ "DBInstance$NetworkType": "The network type of the DB instance.
The network type is determined by the DBSubnetGroup
specified for the DB instance. A DBSubnetGroup
can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL
).
For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide and Working with a DB instance in a VPC in the Amazon Aurora User Guide.
Valid Values: IPV4 | DUAL
The Oracle system ID (Oracle SID) for a container database (CDB). The Oracle SID is also the name of the CDB. This setting is only valid for RDS Custom DB instances.
", "DBInstance$ReadReplicaSourceDBClusterIdentifier": "The identifier of the source DB cluster if this DB instance is a read replica.
", + "DBInstance$PercentProgress": "The progress of the storage optimization operation as a percentage.
", "DBInstanceAutomatedBackup$DBInstanceArn": "The Amazon Resource Name (ARN) for the automated backups.
", "DBInstanceAutomatedBackup$DbiResourceId": "The identifier for the source DB instance, which can't be changed and which is unique to an Amazon Web Services Region.
", "DBInstanceAutomatedBackup$Region": "The Amazon Web Services Region associated with the automated backup.
", @@ -4572,7 +4573,7 @@ "ModifyDBInstanceMessage$StorageType": "The storage type to associate with the DB instance.
If you specify Provisioned IOPS (io1
), you must also include a value for the Iops
parameter.
If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.
Valid Values: gp2 | gp3 | io1 | standard
Default: io1
, if the Iops
parameter is specified. Otherwise, gp2
.
The ARN from the key store with which to associate the instance for TDE encryption.
This setting doesn't apply to RDS Custom DB instances.
", "ModifyDBInstanceMessage$TdeCredentialPassword": "The password for the given ARN from the key store in order to access the device.
This setting doesn't apply to RDS Custom DB instances.
", - "ModifyDBInstanceMessage$CACertificateIdentifier": "The CA certificate identifier to use for the DB instance6's server certificate.
This setting doesn't apply to RDS Custom DB instances.
For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.
", + "ModifyDBInstanceMessage$CACertificateIdentifier": "The CA certificate identifier to use for the DB instance's server certificate.
This setting doesn't apply to RDS Custom DB instances.
For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.
", "ModifyDBInstanceMessage$Domain": "The Active Directory directory ID to move the DB instance to. Specify none
to remove the instance from its current domain. You must create the domain before this operation. Currently, you can create only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances in an Active Directory Domain.
For more information, see Kerberos Authentication in the Amazon RDS User Guide.
This setting doesn't apply to RDS Custom DB instances.
", "ModifyDBInstanceMessage$DomainFqdn": "The fully qualified domain name (FQDN) of an Active Directory domain.
Constraints:
Can't be longer than 64 characters.
Example: mymanagedADtest.mymanagedAD.mydomain
The Active Directory organizational unit for your DB instance to join.
Constraints:
Must be in the distinguished name format.
Can't be longer than 64 characters.
Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain
The name of the RDS event notification subscription.
", "ModifyEventSubscriptionMessage$SnsTopicArn": "The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.
", "ModifyEventSubscriptionMessage$SourceType": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. For RDS Proxy events, specify db-proxy
. If this value isn't specified, all events are returned.
Valid values: db-instance
| db-cluster
| db-parameter-group
| db-security-group
| db-snapshot
| db-cluster-snapshot
| db-proxy
The DB cluster identifier for the global cluster being modified. This parameter isn't case-sensitive.
Constraints:
Must match the identifier of an existing global database cluster.
The new cluster identifier for the global database cluster when modifying a global database cluster. This value is stored as a lowercase string.
Constraints:
Must contain from 1 to 63 letters, numbers, or hyphens
The first character must be a letter
Can't end with a hyphen or contain two consecutive hyphens
Example: my-cluster2
The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless ApplyImmediately
is enabled.
To list all of the available engine versions for aurora-mysql
(for MySQL-based Aurora global databases), use the following command:
aws rds describe-db-engine-versions --engine aurora-mysql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]'
To list all of the available engine versions for aurora-postgresql
(for PostgreSQL-based Aurora global databases), use the following command:
aws rds describe-db-engine-versions --engine aurora-postgresql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]'
The cluster identifier for the global cluster to modify. This parameter isn't case-sensitive.
Constraints:
Must match the identifier of an existing global database cluster.
The new cluster identifier for the global database cluster. This value is stored as a lowercase string.
Constraints:
Must contain from 1 to 63 letters, numbers, or hyphens.
The first character must be a letter.
Can't end with a hyphen or contain two consecutive hyphens.
Example: my-cluster2
The version number of the database engine to which you want to upgrade.
To list all of the available engine versions for aurora-mysql
(for MySQL-based Aurora global databases), use the following command:
aws rds describe-db-engine-versions --engine aurora-mysql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]'
To list all of the available engine versions for aurora-postgresql
(for PostgreSQL-based Aurora global databases), use the following command:
aws rds describe-db-engine-versions --engine aurora-postgresql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]'
The name of the option group to be modified.
Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance once it is associated with a DB instance
", "Option$OptionName": "The name of the option.
", "Option$OptionDescription": "The description of the option.
", diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index 5fc388501e2..bf42c179169 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -19532,6 +19532,10 @@ }, "TextClassificationJobConfig":{ "type":"structure", + "required":[ + "ContentColumn", + "TargetLabelColumn" + ], "members":{ "CompletionCriteria":{"shape":"AutoMLJobCompletionCriteria"}, "ContentColumn":{"shape":"ContentColumn"}, diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index 3f91fe12a3c..52ed923c577 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -2185,7 +2185,7 @@ "ContentColumn": { "base": null, "refs": { - "TextClassificationJobConfig$ContentColumn": "The name of the column used to provide the sentences to be classified. It should not be the same as the target column (Required).
" + "TextClassificationJobConfig$ContentColumn": "The name of the column used to provide the sentences to be classified. It should not be the same as the target column.
" } }, "ContentDigest": { @@ -13291,7 +13291,7 @@ "TargetLabelColumn": { "base": null, "refs": { - "TextClassificationJobConfig$TargetLabelColumn": "The name of the column used to provide the class labels. It should not be same as the content column (Required).
" + "TextClassificationJobConfig$TargetLabelColumn": "The name of the column used to provide the class labels. It should not be same as the content column.
" } }, "TargetObjectiveMetricValue": { diff --git a/models/apis/securityhub/2018-10-26/api-2.json b/models/apis/securityhub/2018-10-26/api-2.json index 92f9a1351b7..663b90f7ab2 100644 --- a/models/apis/securityhub/2018-10-26/api-2.json +++ b/models/apis/securityhub/2018-10-26/api-2.json @@ -1669,6 +1669,34 @@ "UserPoolId":{"shape":"NonEmptyString"} } }, + "AwsAthenaWorkGroupConfigurationDetails":{ + "type":"structure", + "members":{ + "ResultConfiguration":{"shape":"AwsAthenaWorkGroupConfigurationResultConfigurationDetails"} + } + }, + "AwsAthenaWorkGroupConfigurationResultConfigurationDetails":{ + "type":"structure", + "members":{ + "EncryptionConfiguration":{"shape":"AwsAthenaWorkGroupConfigurationResultConfigurationEncryptionConfigurationDetails"} + } + }, + "AwsAthenaWorkGroupConfigurationResultConfigurationEncryptionConfigurationDetails":{ + "type":"structure", + "members":{ + "EncryptionOption":{"shape":"NonEmptyString"}, + "KmsKey":{"shape":"NonEmptyString"} + } + }, + "AwsAthenaWorkGroupDetails":{ + "type":"structure", + "members":{ + "Name":{"shape":"NonEmptyString"}, + "Description":{"shape":"NonEmptyString"}, + "State":{"shape":"NonEmptyString"}, + "Configuration":{"shape":"AwsAthenaWorkGroupConfigurationDetails"} + } + }, "AwsAutoScalingAutoScalingGroupAvailabilityZonesList":{ "type":"list", "member":{"shape":"AwsAutoScalingAutoScalingGroupAvailabilityZonesListDetails"} @@ -5021,6 +5049,17 @@ "type":"list", "member":{"shape":"AwsRdsDbClusterOptionGroupMembership"} }, + "AwsRdsDbClusterSnapshotDbClusterSnapshotAttribute":{ + "type":"structure", + "members":{ + "AttributeName":{"shape":"NonEmptyString"}, + "AttributeValues":{"shape":"NonEmptyStringList"} + } + }, + "AwsRdsDbClusterSnapshotDbClusterSnapshotAttributes":{ + "type":"list", + "member":{"shape":"AwsRdsDbClusterSnapshotDbClusterSnapshotAttribute"} + }, "AwsRdsDbClusterSnapshotDetails":{ "type":"structure", "members":{ @@ -5041,7 +5080,8 @@ "KmsKeyId":{"shape":"NonEmptyString"}, "DbClusterIdentifier":{"shape":"NonEmptyString"}, "DbClusterSnapshotIdentifier":{"shape":"NonEmptyString"}, - "IamDatabaseAuthenticationEnabled":{"shape":"Boolean"} + "IamDatabaseAuthenticationEnabled":{"shape":"Boolean"}, + "DbClusterSnapshotAttributes":{"shape":"AwsRdsDbClusterSnapshotDbClusterSnapshotAttributes"} } }, "AwsRdsDbDomainMembership":{ @@ -8073,7 +8113,9 @@ "type":"string", "enum":[ "EQUALS", - "NOT_EQUALS" + "NOT_EQUALS", + "CONTAINS", + "NOT_CONTAINS" ] }, "MapFilterList":{ @@ -8536,7 +8578,8 @@ "AwsAppSyncGraphQlApi":{"shape":"AwsAppSyncGraphQlApiDetails"}, "AwsEventSchemasRegistry":{"shape":"AwsEventSchemasRegistryDetails"}, "AwsGuardDutyDetector":{"shape":"AwsGuardDutyDetectorDetails"}, - "AwsStepFunctionStateMachine":{"shape":"AwsStepFunctionStateMachineDetails"} + "AwsStepFunctionStateMachine":{"shape":"AwsStepFunctionStateMachineDetails"}, + "AwsAthenaWorkGroup":{"shape":"AwsAthenaWorkGroupDetails"} } }, "ResourceList":{ @@ -9178,7 +9221,9 @@ "EQUALS", "PREFIX", "NOT_EQUALS", - "PREFIX_NOT_EQUALS" + "PREFIX_NOT_EQUALS", + "CONTAINS", + "NOT_CONTAINS" ] }, "StringFilterList":{ diff --git a/models/apis/securityhub/2018-10-26/docs-2.json b/models/apis/securityhub/2018-10-26/docs-2.json index 9aa66ceb135..63b242d9003 100644 --- a/models/apis/securityhub/2018-10-26/docs-2.json +++ b/models/apis/securityhub/2018-10-26/docs-2.json @@ -507,6 +507,30 @@ "AwsAppSyncGraphQlApiDetails$UserPoolConfig": "The Amazon Cognito user pools configuration.
" } }, + "AwsAthenaWorkGroupConfigurationDetails": { + "base": "The configuration of the workgroup, which includes the location in Amazon Simple Storage Service (Amazon S3) where query results are stored, the encryption option, if any, used for query results, whether Amazon CloudWatch metrics are enabled for the workgroup, and the limit for the amount of bytes scanned (cutoff) per query, if it is specified.
", + "refs": { + "AwsAthenaWorkGroupDetails$Configuration": "The configuration of the workgroup, which includes the location in Amazon Simple Storage Service (Amazon S3) where query results are stored, the encryption option, if any, used for query results, whether Amazon CloudWatch metrics are enabled for the workgroup, and the limit for the amount of bytes scanned (cutoff) per query, if it is specified.
" + } + }, + "AwsAthenaWorkGroupConfigurationResultConfigurationDetails": { + "base": "The location in Amazon Simple Storage Service (Amazon S3) where query and calculation results are stored and the encryption option, if any, used for query and calculation results. These are known as client-side settings. If workgroup settings override client-side settings, then the query uses the workgroup settings.
", + "refs": { + "AwsAthenaWorkGroupConfigurationDetails$ResultConfiguration": "The location in Amazon S3 where query and calculation results are stored and the encryption option, if any, used for query and calculation results. These are known as client-side settings. If workgroup settings override client-side settings, then the query uses the workgroup settings.
" + } + }, + "AwsAthenaWorkGroupConfigurationResultConfigurationEncryptionConfigurationDetails": { + "base": "Specifies the method used to encrypt the user’s data stores in the Athena workgroup.
", + "refs": { + "AwsAthenaWorkGroupConfigurationResultConfigurationDetails$EncryptionConfiguration": "Specifies the method used to encrypt the user’s data stores in the Athena workgroup.
" + } + }, + "AwsAthenaWorkGroupDetails": { + "base": "Provides information about an Amazon Athena workgroup.
", + "refs": { + "ResourceDetails$AwsAthenaWorkGroup": "Provides information about an Amazon Athena workgroup. A workgroup helps you separate users, teams, applications, or workloads. It also helps you set limits on data processing and track costs.
" + } + }, "AwsAutoScalingAutoScalingGroupAvailabilityZonesList": { "base": null, "refs": { @@ -2980,6 +3004,18 @@ "AwsRdsDbClusterDetails$DbClusterOptionGroupMemberships": "The list of option group memberships for this DB cluster.
" } }, + "AwsRdsDbClusterSnapshotDbClusterSnapshotAttribute": { + "base": "Contains the name and values of a manual Amazon Relational Database Service (RDS) DB cluster snapshot attribute.
", + "refs": { + "AwsRdsDbClusterSnapshotDbClusterSnapshotAttributes$member": null + } + }, + "AwsRdsDbClusterSnapshotDbClusterSnapshotAttributes": { + "base": null, + "refs": { + "AwsRdsDbClusterSnapshotDetails$DbClusterSnapshotAttributes": "Contains the name and values of a manual DB cluster snapshot attribute.
" + } + }, "AwsRdsDbClusterSnapshotDetails": { "base": "Information about an Amazon RDS DB cluster snapshot.
", "refs": { @@ -4097,8 +4133,8 @@ "base": null, "refs": { "AssociationSetDetails$Main": "Indicates whether this is the main route table.
", - "AutomationRulesConfig$IsTerminal": "Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true
for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding.
The default value of this field is false
.
Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true
for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding.
The default value of this field is false
.
Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If a rule is terminal, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding. By default, a rule isn't terminal.
", + "AutomationRulesMetadata$IsTerminal": "Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If a rule is terminal, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding. By default, a rule isn't terminal.
", "AwsAmazonMqBrokerDetails$AutoMinorVersionUpgrade": "Whether automatically upgrade new minor versions for brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window of the broker or after a manual broker reboot.
", "AwsAmazonMqBrokerDetails$PubliclyAccessible": "Permits connections from applications outside of the VPC that hosts the broker's subnets.
", "AwsAmazonMqBrokerEncryptionOptionsDetails$UseAwsOwnedKey": " Specifies that an KMS key should be used for at-rest encryption. Set to true
by default if no value is provided (for example, for RabbitMQ brokers).
The value of the boolean.
", "ClassificationResult$AdditionalOccurrences": "Indicates whether there are additional occurrences of sensitive data that are not included in the finding. This occurs when the number of occurrences exceeds the maximum that can be included.
", "ContainerDetails$Privileged": "When this parameter is true
, the container is given elevated privileges on the host container instance (similar to the root user).
Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true
for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding. The default value of this field is false
.
Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If a rule is terminal, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding. By default, a rule isn't terminal.
", "DescribeHubResponse$AutoEnableControls": "Whether to automatically enable new controls when they are added to standards that are enabled.
If set to true
, then new controls for enabled standards are enabled automatically. If set to false
, then new controls are not enabled.
Whether to automatically enable Security Hub for new accounts in the organization.
If set to true
, then Security Hub is enabled for new accounts. If set to false, then new accounts are not added automatically.
Whether the maximum number of allowed member accounts are already associated with the Security Hub administrator account.
", @@ -4293,7 +4329,7 @@ "NetworkConnectionAction$Blocked": "Indicates whether the network connection attempt was blocked.
", "PortProbeAction$Blocked": "Indicates whether the port probe was blocked.
", "Standard$EnabledByDefault": "Whether the standard is enabled by default. When Security Hub is enabled from the console, if a standard is enabled by default, the check box for that standard is selected by default.
When Security Hub is enabled using the EnableSecurityHub
API operation, the standard is enabled by default unless EnableDefaultStandards
is set to false
.
Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true
for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding.
The default value of this field is false
.
Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If a rule is terminal, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding. By default, a rule isn't terminal.
", "UpdateOrganizationConfigurationRequest$AutoEnable": "Whether to automatically enable Security Hub for new accounts in the organization.
By default, this is false
, and new accounts are not added automatically.
To automatically enable Security Hub for new accounts, set this to true
.
Whether to automatically enable new controls when they are added to standards that are enabled.
By default, this is set to true
, and new controls are enabled automatically. To not automatically enable new controls, set this to false
.
Indicates whether a local VPC can resolve public DNS hostnames to private IP addresses when queried from instances in a peer VPC.
", @@ -5569,7 +5605,7 @@ } }, "MapFilter": { - "base": "A map filter for querying findings. Each map filter provides the field to check, the value to look for, and the comparison operator.
", + "base": "A map filter for filtering Security Hub findings. Each map filter provides the field to check for, the value to check for, and the comparison operator.
", "refs": { "MapFilterList$member": null } @@ -5577,7 +5613,7 @@ "MapFilterComparison": { "base": null, "refs": { - "MapFilter$Comparison": "The condition to apply to the key value when querying for findings with a map filter.
To search for values that exactly match the filter value, use EQUALS
. For example, for the ResourceTags
field, the filter Department EQUALS Security
matches findings that have the value Security
for the tag Department
.
To search for values other than the filter value, use NOT_EQUALS
. For example, for the ResourceTags
field, the filter Department NOT_EQUALS Finance
matches findings that do not have the value Finance
for the tag Department
.
EQUALS
filters on the same field are joined by OR
. A finding matches if it matches any one of those filters.
NOT_EQUALS
filters on the same field are joined by AND
. A finding matches only if it matches all of those filters.
You cannot have both an EQUALS
filter and a NOT_EQUALS
filter on the same field.
The condition to apply to the key value when filtering Security Hub findings with a map filter.
To search for values that have the filter value, use one of the following comparison operators:
To search for values that include the filter value, use CONTAINS
. For example, for the ResourceTags
field, the filter Department CONTAINS Security
matches findings that include the value Security
for the Department
tag. In the same example, a finding with a value of Security team
for the Department
tag is a match.
To search for values that exactly match the filter value, use EQUALS
. For example, for the ResourceTags
field, the filter Department EQUALS Security
matches findings that have the value Security
for the Department
tag.
CONTAINS
and EQUALS
filters on the same field are joined by OR
. A finding matches if it matches any one of those filters. For example, the filters Department CONTAINS Security OR Department CONTAINS Finance
match a finding that includes either Security
, Finance
, or both values.
To search for values that don't have the filter value, use one of the following comparison operators:
To search for values that exclude the filter value, use NOT_CONTAINS
. For example, for the ResourceTags
field, the filter Department NOT_CONTAINS Finance
matches findings that exclude the value Finance
for the Department
tag.
To search for values other than the filter value, use NOT_EQUALS
. For example, for the ResourceTags
field, the filter Department NOT_EQUALS Finance
matches findings that don’t have the value Finance
for the Department
tag.
NOT_CONTAINS
and NOT_EQUALS
filters on the same field are joined by AND
. A finding matches only if it matches all of those filters. For example, the filters Department NOT_CONTAINS Security AND Department NOT_CONTAINS Finance
match a finding that excludes both the Security
and Finance
values.
CONTAINS
filters can only be used with other CONTAINS
filters. NOT_CONTAINS
filters can only be used with other NOT_CONTAINS
filters.
You can’t have both a CONTAINS
filter and a NOT_CONTAINS
filter on the same field. Similarly, you can’t have both an EQUALS
filter and a NOT_EQUALS
filter on the same field. Combining filters in this way returns an error.
CONTAINS
and NOT_CONTAINS
operators can be used only with automation rules. For more information, see Automation rules in the Security Hub User Guide.
The Amazon Web Services Region in which the user pool was created.
", "AwsAppSyncGraphQlApiUserPoolConfigDetails$DefaultAction": "The action that you want your GraphQL API to take when a request that uses Amazon Cognito user pools authentication doesn't match the Amazon Cognito user pools configuration.
", "AwsAppSyncGraphQlApiUserPoolConfigDetails$UserPoolId": "The user pool ID.
", + "AwsAthenaWorkGroupConfigurationResultConfigurationEncryptionConfigurationDetails$EncryptionOption": "Indicates whether Amazon Simple Storage Service (Amazon S3) server-side encryption with Amazon S3 managed keys (SSE_S3), server-side encryption with KMS keys (SSE_KMS), or client-side encryption with KMS customer managed keys (CSE_KMS) is used.
", + "AwsAthenaWorkGroupConfigurationResultConfigurationEncryptionConfigurationDetails$KmsKey": " For SSE_KMS
and CSE_KMS
, this is the KMS key Amazon Resource Name (ARN) or ID.
The workgroup name.
", + "AwsAthenaWorkGroupDetails$Description": "The workgroup description.
", + "AwsAthenaWorkGroupDetails$State": "Whether the workgroup is enabled or disabled.
", "AwsAutoScalingAutoScalingGroupAvailabilityZonesListDetails$Value": "The name of the Availability Zone.
", "AwsAutoScalingAutoScalingGroupDetails$LaunchConfigurationName": "The name of the launch configuration.
", "AwsAutoScalingAutoScalingGroupDetails$HealthCheckType": "The service to use for the health checks. Valid values are EC2
or ELB
.
The status of the DB cluster parameter group for this member of the DB cluster.
", "AwsRdsDbClusterOptionGroupMembership$DbClusterOptionGroupName": "The name of the DB cluster option group.
", "AwsRdsDbClusterOptionGroupMembership$Status": "The status of the DB cluster option group.
", + "AwsRdsDbClusterSnapshotDbClusterSnapshotAttribute$AttributeName": " The name of the manual DB cluster snapshot attribute. The attribute named restore
refers to the list of Amazon Web Services accounts that have permission to copy or restore the manual DB cluster snapshot.
Indicates when the snapshot was taken.
Uses the date-time
format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces, and date and time should be separated by T
. For example, 2020-03-22T13:22:13.933Z
.
The name of the database engine that you want to use for this DB instance.
", "AwsRdsDbClusterSnapshotDetails$Status": "The status of this DB cluster snapshot.
", @@ -7054,7 +7096,7 @@ "Malware$Name": "The name of the malware that was observed.
", "Malware$Path": "The file system path of the malware that was observed.
", "MapFilter$Key": "The key of the map filter. For example, for ResourceTags
, Key
identifies the name of the tag. For UserDefinedFields
, Key
is the name of the field.
The value for the key in the map filter. Filter values are case sensitive. For example, one of the values for a tag called Department
might be Security
. If you provide security
as the filter value, then there is no match.
The value for the key in the map filter. Filter values are case sensitive. For example, one of the values for a tag called Department
might be Security
. If you provide security
as the filter value, then there's no match.
The email address of the member account.
", "Member$MasterId": "This is replaced by AdministratorID
.
The Amazon Web Services account ID of the Security Hub administrator account associated with this member account.
", "Member$AdministratorId": "The Amazon Web Services account ID of the Security Hub administrator account associated with this member account.
", @@ -7204,7 +7246,7 @@ "StatelessCustomPublishMetricActionDimension$Value": "The value to use for the custom metric dimension.
", "StatusReason$ReasonCode": "A code that represents a reason for the control status. For the list of status reason codes and their meanings, see Standards-related information in the ASFF in the Security Hub User Guide.
", "StatusReason$Description": "The corresponding description for the status reason code.
", - "StringFilter$Value": "The string filter value. Filter values are case sensitive. For example, the product name for control-based findings is Security Hub
. If you provide security hub
as the filter text, then there is no match.
The string filter value. Filter values are case sensitive. For example, the product name for control-based findings is Security Hub
. If you provide security hub
as the filter value, there's no match.
The name of the threat.
", "Threat$Severity": "The severity of the threat.
", @@ -7319,6 +7361,7 @@ "AwsLambdaLayerVersionDetails$CompatibleRuntimes": "The layer's compatible runtimes. Maximum number of five items.
Valid values: nodejs10.x
| nodejs12.x
| java8
| java11
| python2.7
| python3.6
| python3.7
| python3.8
| dotnetcore1.0
| dotnetcore2.1
| go1.x
| ruby2.5
| provided
The list of security group IDs that are associated with the VPC endpoints for the domain.
", "AwsOpenSearchServiceDomainVpcOptionsDetails$SubnetIds": "A list of subnet IDs that are associated with the VPC endpoints for the domain.
", + "AwsRdsDbClusterSnapshotDbClusterSnapshotAttribute$AttributeValues": " The value(s) for the manual DB cluster snapshot attribute. If the AttributeName
field is set to restore
, then this element returns a list of IDs of the Amazon Web Services accounts that are authorized to copy or restore the manual DB cluster snapshot. If a value of all
is in the list, then the manual DB cluster snapshot is public and available for any Amazon Web Services account to copy or restore.
The list of event categories for the event notification subscription.
", "AwsRdsEventSubscriptionDetails$SourceIdsList": "A list of source identifiers for the event notification subscription.
", "AwsSageMakerNotebookInstanceDetails$AcceleratorTypes": "A list of Amazon Elastic Inference instance types to associate with the notebook instance. Currently, only one instance type can be associated with a notebook instance.
", @@ -8129,7 +8172,7 @@ } }, "StringFilter": { - "base": "A string filter for querying findings.
", + "base": "A string filter for filtering Security Hub findings.
", "refs": { "StringFilterList$member": null } @@ -8137,7 +8180,7 @@ "StringFilterComparison": { "base": null, "refs": { - "StringFilter$Comparison": "The condition to apply to a string value when querying for findings. To search for values that contain the filter criteria value, use one of the following comparison operators:
To search for values that exactly match the filter value, use EQUALS
.
For example, the filter ResourceType EQUALS AwsEc2SecurityGroup
only matches findings that have a resource type of AwsEc2SecurityGroup
.
To search for values that start with the filter value, use PREFIX
.
For example, the filter ResourceType PREFIX AwsIam
matches findings that have a resource type that starts with AwsIam
. Findings with a resource type of AwsIamPolicy
, AwsIamRole
, or AwsIamUser
would all match.
EQUALS
and PREFIX
filters on the same field are joined by OR
. A finding matches if it matches any one of those filters.
To search for values that do not contain the filter criteria value, use one of the following comparison operators:
To search for values that do not exactly match the filter value, use NOT_EQUALS
.
For example, the filter ResourceType NOT_EQUALS AwsIamPolicy
matches findings that have a resource type other than AwsIamPolicy
.
To search for values that do not start with the filter value, use PREFIX_NOT_EQUALS
.
For example, the filter ResourceType PREFIX_NOT_EQUALS AwsIam
matches findings that have a resource type that does not start with AwsIam
. Findings with a resource type of AwsIamPolicy
, AwsIamRole
, or AwsIamUser
would all be excluded from the results.
NOT_EQUALS
and PREFIX_NOT_EQUALS
filters on the same field are joined by AND
. A finding matches only if it matches all of those filters.
For filters on the same field, you cannot provide both an EQUALS
filter and a NOT_EQUALS
or PREFIX_NOT_EQUALS
filter. Combining filters in this way always returns an error, even if the provided filter values would return valid results.
You can combine PREFIX
filters with NOT_EQUALS
or PREFIX_NOT_EQUALS
filters for the same field. Security Hub first processes the PREFIX
filters, then the NOT_EQUALS
or PREFIX_NOT_EQUALS
filters.
For example, for the following filter, Security Hub first identifies findings that have resource types that start with either AwsIAM
or AwsEc2
. It then excludes findings that have a resource type of AwsIamPolicy
and findings that have a resource type of AwsEc2NetworkInterface
.
ResourceType PREFIX AwsIam
ResourceType PREFIX AwsEc2
ResourceType NOT_EQUALS AwsIamPolicy
ResourceType NOT_EQUALS AwsEc2NetworkInterface
The condition to apply to a string value when filtering Security Hub findings.
To search for values that have the filter value, use one of the following comparison operators:
To search for values that include the filter value, use CONTAINS
. For example, the filter Title CONTAINS CloudFront
matches findings that have a Title
that includes the string CloudFront.
To search for values that exactly match the filter value, use EQUALS
. For example, the filter AwsAccountId EQUALS 123456789012
only matches findings that have an account ID of 123456789012
.
To search for values that start with the filter value, use PREFIX
. For example, the filter ResourceRegion PREFIX us
matches findings that have a ResourceRegion
that starts with us
. A ResourceRegion
that starts with a different value, such as af
, ap
, or ca
, doesn't match.
CONTAINS
, EQUALS
, and PREFIX
filters on the same field are joined by OR
. A finding matches if it matches any one of those filters. For example, the filters Title CONTAINS CloudFront OR Title CONTAINS CloudWatch
match a finding that includes either CloudFront
, CloudWatch
, or both strings in the title.
To search for values that don’t have the filter value, use one of the following comparison operators:
To search for values that exclude the filter value, use NOT_CONTAINS
. For example, the filter Title NOT_CONTAINS CloudFront
matches findings that have a Title
that excludes the string CloudFront.
To search for values other than the filter value, use NOT_EQUALS
. For example, the filter AwsAccountId NOT_EQUALS 123456789012
only matches findings that have an account ID other than 123456789012
.
To search for values that don't start with the filter value, use PREFIX_NOT_EQUALS
. For example, the filter ResourceRegion PREFIX_NOT_EQUALS us
matches findings with a ResourceRegion
that starts with a value other than us
.
NOT_CONTAINS
, NOT_EQUALS
, and PREFIX_NOT_EQUALS
filters on the same field are joined by AND
. A finding matches only if it matches all of those filters. For example, the filters Title NOT_CONTAINS CloudFront AND Title NOT_CONTAINS CloudWatch
match a finding that excludes both CloudFront
and CloudWatch
in the title.
You can’t have both a CONTAINS
filter and a NOT_CONTAINS
filter on the same field. Similarly, you can't provide both an EQUALS
filter and a NOT_EQUALS
or PREFIX_NOT_EQUALS
filter on the same field. Combining filters in this way returns an error. CONTAINS
filters can only be used with other CONTAINS
filters. NOT_CONTAINS
filters can only be used with other NOT_CONTAINS
filters.
You can combine PREFIX
filters with NOT_EQUALS
or PREFIX_NOT_EQUALS
filters for the same field. Security Hub first processes the PREFIX
filters, and then the NOT_EQUALS
or PREFIX_NOT_EQUALS
filters.
For example, for the following filters, Security Hub first identifies findings that have resource types that start with either AwsIam
or AwsEc2
. It then excludes findings that have a resource type of AwsIamPolicy
and findings that have a resource type of AwsEc2NetworkInterface
.
ResourceType PREFIX AwsIam
ResourceType PREFIX AwsEc2
ResourceType NOT_EQUALS AwsIamPolicy
ResourceType NOT_EQUALS AwsEc2NetworkInterface
CONTAINS
and NOT_CONTAINS
operators can be used only with automation rules. For more information, see Automation rules in the Security Hub User Guide.
Reserved for future use.
", + "refs": { + "ProvidedContextsListType$member": null + } + }, + "ProvidedContextsListType": { + "base": null, + "refs": { + "AssumeRoleRequest$ProvidedContexts": "Reserved for future use.
" + } + }, "RegionDisabledException": { "base": "STS is not activated in the requested region for the account that is being asked to generate credentials. The account administrator must use the IAM console to activate STS in that region. For more information, see Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region in the IAM User Guide.
", "refs": { @@ -236,7 +248,8 @@ "AssumedRoleUser$Arn": "The ARN of the temporary security credentials that are returned from the AssumeRole action. For more information about ARNs and how to use them in policies, see IAM Identifiers in the IAM User Guide.
", "FederatedUser$Arn": "The ARN that specifies the federated user that is associated with the credentials. For more information about ARNs and how to use them in policies, see IAM Identifiers in the IAM User Guide.
", "GetCallerIdentityResponse$Arn": "The Amazon Web Services ARN associated with the calling entity.
", - "PolicyDescriptorType$arn": "The Amazon Resource Name (ARN) of the IAM managed policy to use as a session policy for the role. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.
" + "PolicyDescriptorType$arn": "The Amazon Resource Name (ARN) of the IAM managed policy to use as a session policy for the role. For more information about ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.
", + "ProvidedContext$ProviderArn": "Reserved for future use.
" } }, "assumedRoleIdType": { @@ -248,7 +261,13 @@ "clientTokenType": { "base": null, "refs": { - "AssumeRoleWithWebIdentityRequest$WebIdentityToken": "The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity provider. Your application must get this token by authenticating the user who is using your application with a web identity provider before the application makes an AssumeRoleWithWebIdentity
call.
The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity provider. Your application must get this token by authenticating the user who is using your application with a web identity provider before the application makes an AssumeRoleWithWebIdentity
call. Only tokens with RSA algorithms (RS256) are supported.
Reserved for future use.
" } }, "dateType": { diff --git a/models/apis/sts/2011-06-15/endpoint-tests-1.json b/models/apis/sts/2011-06-15/endpoint-tests-1.json index b566f4aac4a..5e12a28e224 100644 --- a/models/apis/sts/2011-06-15/endpoint-tests-1.json +++ b/models/apis/sts/2011-06-15/endpoint-tests-1.json @@ -702,9 +702,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -734,9 +734,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -766,9 +766,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -798,9 +798,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -830,9 +830,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -862,9 +862,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -894,9 +894,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -926,9 +926,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -958,9 +958,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -990,9 +990,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -1022,9 +1022,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -1054,9 +1054,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -1086,9 +1086,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -1118,9 +1118,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -1150,9 +1150,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -1182,9 +1182,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -1214,9 +1214,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-3", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-3" } ] }, diff --git a/models/apis/transfer/2018-11-05/api-2.json b/models/apis/transfer/2018-11-05/api-2.json index 2f696f014cc..8b6b457cdd2 100644 --- a/models/apis/transfer/2018-11-05/api-2.json +++ b/models/apis/transfer/2018-11-05/api-2.json @@ -754,6 +754,21 @@ {"shape":"ResourceNotFoundException"} ] }, + "TestConnection":{ + "name":"TestConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TestConnectionRequest"}, + "output":{"shape":"TestConnectionResponse"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServiceError"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ] + }, "TestIdentityProvider":{ "name":"TestIdentityProvider", "http":{ @@ -1139,7 +1154,6 @@ "type":"structure", "required":[ "Url", - "As2Config", "AccessRole" ], "members":{ @@ -1147,7 +1161,8 @@ "As2Config":{"shape":"As2ConnectorConfig"}, "AccessRole":{"shape":"Role"}, "LoggingRole":{"shape":"Role"}, - "Tags":{"shape":"Tags"} + "Tags":{"shape":"Tags"}, + "SftpConfig":{"shape":"SftpConnectorConfig"} } }, "CreateConnectorResponse":{ @@ -1637,7 +1652,8 @@ "As2Config":{"shape":"As2ConnectorConfig"}, "AccessRole":{"shape":"Role"}, "LoggingRole":{"shape":"Role"}, - "Tags":{"shape":"Tags"} + "Tags":{"shape":"Tags"}, + "SftpConfig":{"shape":"SftpConnectorConfig"} } }, "DescribedExecution":{ @@ -2703,6 +2719,11 @@ "max":16, "min":0 }, + "SecretId":{ + "type":"string", + "max":2048, + "min":1 + }, "SecurityGroupId":{ "type":"string", "max":20, @@ -2795,6 +2816,24 @@ "PUBLIC_KEY_AND_PASSWORD" ] }, + "SftpConnectorConfig":{ + "type":"structure", + "members":{ + "UserSecretId":{"shape":"SecretId"}, + "TrustedHostKeys":{"shape":"SftpConnectorTrustedHostKeyList"} + } + }, + "SftpConnectorTrustedHostKey":{ + "type":"string", + "max":2048, + "min":1 + }, + "SftpConnectorTrustedHostKeyList":{ + "type":"list", + "member":{"shape":"SftpConnectorTrustedHostKey"}, + "max":10, + "min":1 + }, "SigningAlg":{ "type":"string", "enum":[ @@ -2846,13 +2885,13 @@ }, "StartFileTransferRequest":{ "type":"structure", - "required":[ - "ConnectorId", - "SendFilePaths" - ], + "required":["ConnectorId"], "members":{ "ConnectorId":{"shape":"ConnectorId"}, - "SendFilePaths":{"shape":"FilePaths"} + "SendFilePaths":{"shape":"FilePaths"}, + "RetrieveFilePaths":{"shape":"FilePaths"}, + "LocalDirectoryPath":{"shape":"FilePath"}, + "RemoteDirectoryPath":{"shape":"FilePath"} } }, "StartFileTransferResponse":{ @@ -2880,6 +2919,7 @@ "STOP_FAILED" ] }, + "Status":{"type":"string"}, "StatusCode":{"type":"integer"}, "StepResultOutputsJson":{ "type":"string", @@ -2953,6 +2993,21 @@ "max":50, "min":1 }, + "TestConnectionRequest":{ + "type":"structure", + "required":["ConnectorId"], + "members":{ + "ConnectorId":{"shape":"ConnectorId"} + } + }, + "TestConnectionResponse":{ + "type":"structure", + "members":{ + "ConnectorId":{"shape":"ConnectorId"}, + "Status":{"shape":"Status"}, + "StatusMessage":{"shape":"Message"} + } + }, "TestIdentityProviderRequest":{ "type":"structure", "required":[ @@ -3089,7 +3144,8 @@ "Url":{"shape":"Url"}, "As2Config":{"shape":"As2ConnectorConfig"}, "AccessRole":{"shape":"Role"}, - "LoggingRole":{"shape":"Role"} + "LoggingRole":{"shape":"Role"}, + "SftpConfig":{"shape":"SftpConnectorConfig"} } }, "UpdateConnectorResponse":{ diff --git a/models/apis/transfer/2018-11-05/docs-2.json b/models/apis/transfer/2018-11-05/docs-2.json index 747d0c57fa8..dc9fe1b2c3e 100644 --- a/models/apis/transfer/2018-11-05/docs-2.json +++ b/models/apis/transfer/2018-11-05/docs-2.json @@ -4,7 +4,7 @@ "operations": { "CreateAccess": "Used by administrators to choose which groups in the directory should have access to upload and download files over the enabled protocols using Transfer Family. For example, a Microsoft Active Directory might contain 50,000 users, but only a small fraction might need the ability to transfer files to the server. An administrator can use CreateAccess
to limit the access to the correct set of users who need this ability.
Creates an agreement. An agreement is a bilateral trading partner agreement, or partnership, between an Transfer Family server and an AS2 process. The agreement defines the file and message transfer relationship between the server and the AS2 process. To define an agreement, Transfer Family combines a server, local profile, partner profile, certificate, and other attributes.
The partner is identified with the PartnerProfileId
, and the AS2 process is identified with the LocalProfileId
.
Creates the connector, which captures the parameters for an outbound connection for the AS2 protocol. The connector is required for sending files to an externally hosted AS2 server. For more details about connectors, see Create AS2 connectors.
", + "CreateConnector": "Creates the connector, which captures the parameters for an outbound connection for the AS2 or SFTP protocol. The connector is required for sending files to an externally hosted AS2 or SFTP server. For more details about AS2 connectors, see Create AS2 connectors.
You must specify exactly one configuration object: either for AS2 (As2Config
) or SFTP (SftpConfig
).
Creates the local or partner profile to use for AS2 transfers.
", "CreateServer": "Instantiates an auto-scaling virtual server based on the selected file transfer protocol in Amazon Web Services. When you make updates to your file transfer protocol-enabled server or when you work with users, use the service-generated ServerId
property that is assigned to the newly created server.
Creates a user and associates them with an existing file transfer protocol-enabled server. You can only create and associate users with servers that have the IdentityProviderType
set to SERVICE_MANAGED
. Using parameters for CreateUser
, you can specify the user name, set the home directory, store the user's public key, and assign the user's Identity and Access Management (IAM) role. You can also optionally add a session policy, and assign metadata with tags that can be used to group and search for users.
Allows you to delete the access specified in the ServerID
and ExternalID
parameters.
Delete the agreement that's specified in the provided AgreementId
.
Deletes the certificate that's specified in the CertificateId
parameter.
Deletes the agreement that's specified in the provided ConnectorId
.
Deletes the connector that's specified in the provided ConnectorId
.
Deletes the host key that's specified in the HostKeyId
parameter.
Deletes the profile that's specified in the ProfileId
parameter.
Deletes the file transfer protocol-enabled server that you specify.
No response returns from this operation.
", @@ -46,10 +46,11 @@ "ListUsers": "Lists the users for a file transfer protocol-enabled server that you specify by passing the ServerId
parameter.
Lists all workflows associated with your Amazon Web Services account for your current region.
", "SendWorkflowStepState": "Sends a callback for asynchronous custom steps.
The ExecutionId
, WorkflowId
, and Token
are passed to the target resource during execution of a custom step of a workflow. You must include those with their callback as well as providing a status.
Begins an outbound file transfer to a remote AS2 server. You specify the ConnectorId
and the file paths for where to send the files.
Begins a file transfer between local Amazon Web Services storage and a remote AS2 or SFTP server.
For an AS2 connector, you specify the ConnectorId
and one or more SendFilePaths
to identify the files you want to transfer.
For an SFTP connector, the file transfer can be either outbound or inbound. In both cases, you specify the ConnectorId
. Depending on the direction of the transfer, you also specify the following items:
If you are transferring file from a partner's SFTP server to a Transfer Family server, you specify one or more RetreiveFilePaths
to identify the files you want to transfer, and a LocalDirectoryPath
to specify the destination folder.
If you are transferring file to a partner's SFTP server from Amazon Web Services storage, you specify one or more SendFilePaths
to identify the files you want to transfer, and a RemoteDirectoryPath
to specify the destination folder.
Changes the state of a file transfer protocol-enabled server from OFFLINE
to ONLINE
. It has no impact on a server that is already ONLINE
. An ONLINE
server can accept and process file transfer jobs.
The state of STARTING
indicates that the server is in an intermediate state, either not fully able to respond, or not fully online. The values of START_FAILED
can indicate an error condition.
No response is returned from this call.
", "StopServer": "Changes the state of a file transfer protocol-enabled server from ONLINE
to OFFLINE
. An OFFLINE
server cannot accept and process file transfer jobs. Information tied to your server, such as server and user properties, are not affected by stopping your server.
Stopping the server does not reduce or impact your file transfer protocol endpoint billing; you must delete the server to stop being billed.
The state of STOPPING
indicates that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of STOP_FAILED
can indicate an error condition.
No response is returned from this call.
", "TagResource": "Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN). Resources are users, servers, roles, and other entities.
There is no response returned from this call.
", + "TestConnection": "Tests whether your SFTP connector is set up successfully. We highly recommend that you call this operation to test your ability to transfer files between a Transfer Family server and a trading partner's SFTP server.
", "TestIdentityProvider": "If the IdentityProviderType
of a file transfer protocol-enabled server is AWS_DIRECTORY_SERVICE
or API_Gateway
, tests whether your identity provider is set up successfully. We highly recommend that you call this operation to test your authentication method as soon as you create your server. By doing so, you can troubleshoot issues with the identity provider integration to ensure that your users can successfully use the service.
The ServerId
and UserName
parameters are required. The ServerProtocol
, SourceIp
, and UserPassword
are all optional.
Note the following:
You cannot use TestIdentityProvider
if the IdentityProviderType
of your server is SERVICE_MANAGED
.
TestIdentityProvider
does not work with keys: it only accepts passwords.
TestIdentityProvider
can test the password operation for a custom Identity Provider that handles keys and passwords.
If you provide any incorrect values for any parameters, the Response
field is empty.
If you provide a server ID for a server that uses service-managed users, you get an error:
An error occurred (InvalidRequestException) when calling the TestIdentityProvider operation: s-server-ID not configured for external auth
If you enter a Server ID for the --server-id
parameter that does not identify an actual Transfer server, you receive the following error:
An error occurred (ResourceNotFoundException) when calling the TestIdentityProvider operation: Unknown server
.
It is possible your sever is in a different region. You can specify a region by adding the following: --region region-code
, such as --region us-east-2
to specify a server in US East (Ohio).
Detaches a key-value pair from a resource, as identified by its Amazon Resource Name (ARN). Resources are users, servers, roles, and other entities.
No response is returned from this call.
", "UpdateAccess": "Allows you to update parameters for the access specified in the ServerID
and ExternalID
parameters.
Contains the details for a connector object. The connector object is used for AS2 outbound processes, to connect the Transfer Family customer with the trading partner.
", + "base": "Contains the details for an AS2 connector object. The connector object is used for AS2 outbound processes, to connect the Transfer Family customer with the trading partner.
", "refs": { - "CreateConnectorRequest$As2Config": "A structure that contains the parameters for a connector object.
", - "DescribedConnector$As2Config": "A structure that contains the parameters for a connector object.
", - "UpdateConnectorRequest$As2Config": "A structure that contains the parameters for a connector object.
" + "CreateConnectorRequest$As2Config": "A structure that contains the parameters for an AS2 connector object.
", + "DescribedConnector$As2Config": "A structure that contains the parameters for an AS2 connector object.
", + "UpdateConnectorRequest$As2Config": "A structure that contains the parameters for an AS2 connector object.
" } }, "As2ConnectorSecretId": { @@ -271,7 +272,9 @@ "DescribeConnectorRequest$ConnectorId": "The unique identifier for the connector.
", "DescribedConnector$ConnectorId": "The unique identifier for the connector.
", "ListedConnector$ConnectorId": "The unique identifier for the connector.
", - "StartFileTransferRequest$ConnectorId": "The unique identifier for the connector.
", + "StartFileTransferRequest$ConnectorId": "The unique identifier for the connector.
", + "TestConnectionRequest$ConnectorId": "The unique identifier for the connector.
", + "TestConnectionResponse$ConnectorId": "Returns the identifier of the connector object that you are testing.
", "UpdateConnectorRequest$ConnectorId": "The unique identifier for the connector.
", "UpdateConnectorResponse$ConnectorId": "Returns the identifier of the connector object that you are updating.
" } @@ -773,13 +776,16 @@ "FilePath": { "base": null, "refs": { - "FilePaths$member": null + "FilePaths$member": null, + "StartFileTransferRequest$LocalDirectoryPath": "For an inbound transfer, the LocaDirectoryPath
specifies the destination for one or more files that are transferred from the partner's SFTP server.
For an outbound transfer, the RemoteDirectoryPath
specifies the destination for one or more files that are transferred to the partner's SFTP server. If you don't specify a RemoteDirectoryPath
, the destination for transferred files is the SFTP user's home directory.
An array of strings. Each string represents the absolute path for one outbound file transfer. For example, DOC-EXAMPLE-BUCKET/myfile.txt
.
One or more source paths for the Transfer Family server. Each string represents a source file path for one outbound file transfer. For example, DOC-EXAMPLE-BUCKET/myfile.txt
.
One or more source paths for the partner's SFTP server. Each string represents a source file path for one inbound file transfer.
" } }, "Fips": { @@ -820,7 +826,7 @@ "base": null, "refs": { "CreateAccessRequest$HomeDirectoryMappings": "Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry
and Target
pair, where Entry
shows how the path is made visible and Target
is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) role provides access to paths in Target
. This value can be set only when HomeDirectoryType
is set to LOGICAL.
The following is an Entry
and Target
pair example.
[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot
\"). To do this, you can set Entry
to /
and set Target
to the HomeDirectory
parameter value.
The following is an Entry
and Target
pair example for chroot
.
[ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry
and Target
pair, where Entry
shows how the path is made visible and Target
is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) role provides access to paths in Target
. This value can be set only when HomeDirectoryType
is set to LOGICAL.
The following is an Entry
and Target
pair example.
[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot
\"). To do this, you can set Entry
to /
and set Target
to the HomeDirectory parameter value.
The following is an Entry
and Target
pair example for chroot
.
[ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry
and Target
pair, where Entry
shows how the path is made visible and Target
is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) role provides access to paths in Target
. This value can be set only when HomeDirectoryType
is set to LOGICAL.
The following is an Entry
and Target
pair example.
[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot
\"). To do this, you can set Entry
to /
and set Target
to the value the user should see for their home directory when they log in.
The following is an Entry
and Target
pair example for chroot
.
[ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry
and Target
pair, where Entry
shows how the path is made visible and Target
is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) role provides access to paths in Target
. This value can be set only when HomeDirectoryType
is set to LOGICAL.
In most cases, you can use this value instead of the session policy to lock down the associated access to the designated home directory (\"chroot
\"). To do this, you can set Entry
to '/' and set Target
to the HomeDirectory
parameter value.
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry
and Target
pair, where Entry
shows how the path is made visible and Target
is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) role provides access to paths in Target
. This value can be set only when HomeDirectoryType
is set to LOGICAL.
In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot
\"). To do this, you can set Entry
to '/' and set Target
to the HomeDirectory parameter value.
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry
and Target
pair, where Entry
shows how the path is made visible and Target
is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) role provides access to paths in Target
. This value can be set only when HomeDirectoryType
is set to LOGICAL.
The following is an Entry
and Target
pair example.
[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot
\"). To do this, you can set Entry
to /
and set Target
to the HomeDirectory
parameter value.
The following is an Entry
and Target
pair example for chroot
.
[ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
Returns Connection succeeded
if the test is successful. Or, returns a descriptive error message if the test fails. The following list provides the details for some error messages and troubleshooting steps for each.
Unable to access secrets manager: Verify that your secret name aligns with the one in Transfer Role permissions.
Unknown Host/Connection failed: Verify the server URL in the connector configuration , and verify that the login credentials work successfully outside of the connector.
Private key not found: Verify that the secret exists and is formatted correctly.
Invalid trusted host keys: Verify that the trusted host key in the connector configuration matches the ssh-keyscan
output.
A message that indicates whether the test was successful or not.
If an empty string is returned, the most likely cause is that the authentication failed due to an incorrect username or password.
The secondary POSIX group IDs used for all EFS operations by this user.
" } }, + "SecretId": { + "base": null, + "refs": { + "SftpConnectorConfig$UserSecretId": "The identifiers for the secrets (in Amazon Web Services Secrets Manager) that contain the SFTP user's private keys or passwords.
" + } + }, "SecurityGroupId": { "base": null, "refs": { @@ -1706,6 +1719,26 @@ "IdentityProviderDetails$SftpAuthenticationMethods": "For SFTP-enabled servers, and for custom identity providers only, you can specify whether to authenticate using a password, SSH key pair, or both.
PASSWORD
- users must provide their password to connect.
PUBLIC_KEY
- users must provide their private key to connect.
PUBLIC_KEY_OR_PASSWORD
- users can authenticate with either their password or their key. This is the default value.
PUBLIC_KEY_AND_PASSWORD
- users must provide both their private key and their password to connect. The server checks the key first, and then if the key is valid, the system prompts for a password. If the private key provided does not match the public key that is stored, authentication fails.
Contains the details for an SFTP connector object. The connector object is used for transferring files to and from a partner's SFTP server.
", + "refs": { + "CreateConnectorRequest$SftpConfig": "A structure that contains the parameters for an SFTP connector object.
", + "DescribedConnector$SftpConfig": "A structure that contains the parameters for an SFTP connector object.
", + "UpdateConnectorRequest$SftpConfig": "A structure that contains the parameters for an SFTP connector object.
" + } + }, + "SftpConnectorTrustedHostKey": { + "base": null, + "refs": { + "SftpConnectorTrustedHostKeyList$member": null + } + }, + "SftpConnectorTrustedHostKeyList": { + "base": null, + "refs": { + "SftpConnectorConfig$TrustedHostKeys": "The public portion of the host key, or keys, that are used to authenticate the user to the external server to which you are connecting. You can use the ssh-keyscan
command against the SFTP server to retrieve the necessary key.
The three standard SSH public key format elements are <key type>
, <body base64>
, and an optional <comment>
, with spaces between each element.
For the trusted host key, Transfer Family accepts RSA and ECDSA keys.
For RSA keys, the key type is ssh-rsa
.
For ECDSA keys, the key type is either ecdsa-sha2-nistp256
, ecdsa-sha2-nistp384
, or ecdsa-sha2-nistp521
, depending on the size of the key you generated.
The condition of the server that was described. A value of ONLINE
indicates that the server can accept jobs and transfer files. A State
value of OFFLINE
means that the server cannot perform file transfer operations.
The states of STARTING
and STOPPING
indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED
or STOP_FAILED
can indicate an error condition.
Returns OK
for successful test, or ERROR
if the test fails.
Key-value pairs assigned to ARNs that you can use to group and search for resources by type. You can attach this metadata to resources (servers, users, workflows, and so on) for any purpose.
" } }, + "TestConnectionRequest": { + "base": null, + "refs": { + } + }, + "TestConnectionResponse": { + "base": null, + "refs": { + } + }, "TestIdentityProviderRequest": { "base": null, "refs": { @@ -1904,7 +1953,7 @@ "TransferId": { "base": null, "refs": { - "StartFileTransferResponse$TransferId": "Returns the unique identifier for this file transfer.
" + "StartFileTransferResponse$TransferId": "Returns the unique identifier for the file transfer.
" } }, "UntagResourceRequest": { @@ -1995,12 +2044,12 @@ "Url": { "base": null, "refs": { - "CreateConnectorRequest$Url": "The URL of the partner's AS2 endpoint.
", - "DescribedConnector$Url": "The URL of the partner's AS2 endpoint.
", + "CreateConnectorRequest$Url": "The URL of the partner's AS2 or SFTP endpoint.
", + "DescribedConnector$Url": "The URL of the partner's AS2 or SFTP endpoint.
", "IdentityProviderDetails$Url": "Provides the location of the service endpoint used to authenticate users.
", - "ListedConnector$Url": "The URL of the partner's AS2 endpoint.
", + "ListedConnector$Url": "The URL of the partner's AS2 or SFTP endpoint.
", "TestIdentityProviderResponse$Url": "The endpoint of the service used to authenticate a user.
", - "UpdateConnectorRequest$Url": "The URL of the partner's AS2 endpoint.
" + "UpdateConnectorRequest$Url": "The URL of the partner's AS2 or SFTP endpoint.
" } }, "UserCount": { diff --git a/models/apis/wisdom/2020-10-19/api-2.json b/models/apis/wisdom/2020-10-19/api-2.json index bd5d1eb98cc..af4a2d03a32 100644 --- a/models/apis/wisdom/2020-10-19/api-2.json +++ b/models/apis/wisdom/2020-10-19/api-2.json @@ -586,6 +586,7 @@ "assistantArn":{"shape":"Arn"}, "assistantId":{"shape":"Uuid"}, "description":{"shape":"Description"}, + "integrationConfiguration":{"shape":"AssistantIntegrationConfiguration"}, "name":{"shape":"Name"}, "serverSideEncryptionConfiguration":{"shape":"ServerSideEncryptionConfiguration"}, "status":{"shape":"AssistantStatus"}, @@ -593,6 +594,12 @@ "type":{"shape":"AssistantType"} } }, + "AssistantIntegrationConfiguration":{ + "type":"structure", + "members":{ + "topicIntegrationArn":{"shape":"GenericArn"} + } + }, "AssistantList":{ "type":"list", "member":{"shape":"AssistantSummary"} @@ -621,6 +628,7 @@ "assistantArn":{"shape":"Arn"}, "assistantId":{"shape":"Uuid"}, "description":{"shape":"Description"}, + "integrationConfiguration":{"shape":"AssistantIntegrationConfiguration"}, "name":{"shape":"Name"}, "serverSideEncryptionConfiguration":{"shape":"ServerSideEncryptionConfiguration"}, "status":{"shape":"AssistantStatus"}, @@ -1755,12 +1763,19 @@ ], "members":{ "description":{"shape":"Description"}, + "integrationConfiguration":{"shape":"SessionIntegrationConfiguration"}, "name":{"shape":"Name"}, "sessionArn":{"shape":"Arn"}, "sessionId":{"shape":"Uuid"}, "tags":{"shape":"Tags"} } }, + "SessionIntegrationConfiguration":{ + "type":"structure", + "members":{ + "topicIntegrationArn":{"shape":"GenericArn"} + } + }, "SessionSummaries":{ "type":"list", "member":{"shape":"SessionSummary"} diff --git a/models/apis/wisdom/2020-10-19/docs-2.json b/models/apis/wisdom/2020-10-19/docs-2.json index d0ac3769ab6..4e0929370a1 100644 --- a/models/apis/wisdom/2020-10-19/docs-2.json +++ b/models/apis/wisdom/2020-10-19/docs-2.json @@ -111,6 +111,13 @@ "GetAssistantResponse$assistant": "Information about the assistant.
" } }, + "AssistantIntegrationConfiguration": { + "base": "The configuration information for the Wisdom assistant integration.
", + "refs": { + "AssistantData$integrationConfiguration": "The configuration information for the Wisdom assistant integration.
", + "AssistantSummary$integrationConfiguration": "The configuration information for the Wisdom assistant integration.
" + } + }, "AssistantList": { "base": null, "refs": { @@ -370,7 +377,9 @@ "GenericArn": { "base": null, "refs": { - "AppIntegrationsConfiguration$appIntegrationArn": "The Amazon Resource Name (ARN) of the AppIntegrations DataIntegration to use for ingesting content.
For Salesforce, your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least Id
, ArticleNumber
, VersionNumber
, Title
, PublishStatus
, and IsDeleted
as source fields.
For ServiceNow, your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least number
, short_description
, sys_mod_count
, workflow_state
, and active
as source fields.
For Zendesk, your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields
is not provided, including at least id
, title
, updated_at
, and draft
as source fields.
For SharePoint, your AppIntegrations DataIntegration must have a FileConfiguration, including only file extensions that are among docx
, pdf
, html
, htm
, and txt
.
The Amazon Resource Name (ARN) of the AppIntegrations DataIntegration to use for ingesting content.
For Salesforce, your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least Id
, ArticleNumber
, VersionNumber
, Title
, PublishStatus
, and IsDeleted
as source fields.
For ServiceNow, your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least number
, short_description
, sys_mod_count
, workflow_state
, and active
as source fields.
For Zendesk, your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields
is not provided, including at least id
, title
, updated_at
, and draft
as source fields.
For SharePoint, your AppIntegrations DataIntegration must have a FileConfiguration, including only file extensions that are among docx
, pdf
, html
, htm
, and txt
.
The Amazon Resource Name (ARN) of the integrated Amazon SNS topic used for streaming chat messages.
", + "SessionIntegrationConfiguration$topicIntegrationArn": "The Amazon Resource Name (ARN) of the integrated Amazon SNS topic used for streaming chat messages.
" } }, "GetAssistantAssociationRequest": { @@ -846,6 +855,12 @@ "GetSessionResponse$session": "The session.
" } }, + "SessionIntegrationConfiguration": { + "base": "The configuration information for the session integration.
", + "refs": { + "SessionData$integrationConfiguration": "The configuration information for the session integration.
" + } + }, "SessionSummaries": { "base": null, "refs": { diff --git a/models/apis/wisdom/2020-10-19/endpoint-tests-1.json b/models/apis/wisdom/2020-10-19/endpoint-tests-1.json index 992d9927332..d3307f81d16 100644 --- a/models/apis/wisdom/2020-10-19/endpoint-tests-1.json +++ b/models/apis/wisdom/2020-10-19/endpoint-tests-1.json @@ -8,9 +8,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-central-1", "UseFIPS": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-2", "UseFIPS": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -112,9 +112,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -125,9 +125,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -138,9 +138,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -164,9 +164,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -177,9 +177,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -190,9 +190,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -203,9 +203,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -216,9 +216,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -229,9 +240,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -242,9 +264,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -255,9 +288,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -268,9 +312,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -281,9 +325,9 @@ } }, "params": { - "UseDualStack": false, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -295,8 +339,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -306,9 +350,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -318,11 +362,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/service/billingconductor/api.go b/service/billingconductor/api.go index 68c363be474..7051374a9eb 100644 --- a/service/billingconductor/api.go +++ b/service/billingconductor/api.go @@ -80,8 +80,7 @@ func (c *BillingConductor) AssociateAccountsRequest(input *AssociateAccountsInpu // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - ServiceLimitExceededException // The request would cause a service limit to exceed. @@ -179,8 +178,7 @@ func (c *BillingConductor) AssociatePricingRulesRequest(input *AssociatePricingR // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - ServiceLimitExceededException // The request would cause a service limit to exceed. @@ -277,8 +275,7 @@ func (c *BillingConductor) BatchAssociateResourcesToCustomLineItemRequest(input // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - ServiceLimitExceededException // The request would cause a service limit to exceed. @@ -375,8 +372,7 @@ func (c *BillingConductor) BatchDisassociateResourcesFromCustomLineItemRequest(i // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -471,8 +467,7 @@ func (c *BillingConductor) CreateBillingGroupRequest(input *CreateBillingGroupIn // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - ServiceLimitExceededException // The request would cause a service limit to exceed. @@ -568,8 +563,7 @@ func (c *BillingConductor) CreateCustomLineItemRequest(input *CreateCustomLineIt // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - ServiceLimitExceededException // The request would cause a service limit to exceed. @@ -664,8 +658,7 @@ func (c *BillingConductor) CreatePricingPlanRequest(input *CreatePricingPlanInpu // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - ServiceLimitExceededException // The request would cause a service limit to exceed. @@ -763,8 +756,7 @@ func (c *BillingConductor) CreatePricingRuleRequest(input *CreatePricingRuleInpu // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - ServiceLimitExceededException // The request would cause a service limit to exceed. @@ -855,8 +847,7 @@ func (c *BillingConductor) DeleteBillingGroupRequest(input *DeleteBillingGroupIn // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -948,8 +939,7 @@ func (c *BillingConductor) DeleteCustomLineItemRequest(input *DeleteCustomLineIt // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -1041,8 +1031,7 @@ func (c *BillingConductor) DeletePricingPlanRequest(input *DeletePricingPlanInpu // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -1134,8 +1123,7 @@ func (c *BillingConductor) DeletePricingRuleRequest(input *DeletePricingRuleInpu // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -1226,8 +1214,7 @@ func (c *BillingConductor) DisassociateAccountsRequest(input *DisassociateAccoun // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -1321,8 +1308,7 @@ func (c *BillingConductor) DisassociatePricingRulesRequest(input *DisassociatePr // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -1422,8 +1408,7 @@ func (c *BillingConductor) ListAccountAssociationsRequest(input *ListAccountAsso // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -1573,8 +1558,7 @@ func (c *BillingConductor) ListBillingGroupCostReportsRequest(input *ListBilling // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -1724,8 +1708,7 @@ func (c *BillingConductor) ListBillingGroupsRequest(input *ListBillingGroupsInpu // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -1873,8 +1856,7 @@ func (c *BillingConductor) ListCustomLineItemVersionsRequest(input *ListCustomLi // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -2021,8 +2003,7 @@ func (c *BillingConductor) ListCustomLineItemsRequest(input *ListCustomLineItems // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -2171,8 +2152,7 @@ func (c *BillingConductor) ListPricingPlansRequest(input *ListPricingPlansInput) // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -2317,8 +2297,7 @@ func (c *BillingConductor) ListPricingPlansAssociatedWithPricingRuleRequest(inpu // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -2467,8 +2446,7 @@ func (c *BillingConductor) ListPricingRulesRequest(input *ListPricingRulesInput) // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -2613,8 +2591,7 @@ func (c *BillingConductor) ListPricingRulesAssociatedToPricingPlanRequest(input // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -2762,8 +2739,7 @@ func (c *BillingConductor) ListResourcesAssociatedToCustomLineItemRequest(input // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -2905,8 +2881,7 @@ func (c *BillingConductor) ListTagsForResourceRequest(input *ListTagsForResource // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -3000,8 +2975,7 @@ func (c *BillingConductor) TagResourceRequest(input *TagResourceInput) (req *req // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -3093,8 +3067,7 @@ func (c *BillingConductor) UntagResourceRequest(input *UntagResourceInput) (req // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -3188,8 +3161,7 @@ func (c *BillingConductor) UpdateBillingGroupRequest(input *UpdateBillingGroupIn // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -3280,8 +3252,7 @@ func (c *BillingConductor) UpdateCustomLineItemRequest(input *UpdateCustomLineIt // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -3375,8 +3346,7 @@ func (c *BillingConductor) UpdatePricingPlanRequest(input *UpdatePricingPlanInpu // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -3470,8 +3440,7 @@ func (c *BillingConductor) UpdatePricingRuleRequest(input *UpdatePricingRuleInpu // You do not have sufficient access to perform this action. // // - ValidationException -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. // // - InternalServerException // An unexpected error occurred while processing a request. @@ -3633,10 +3602,14 @@ func (s *AccountAssociationsListElement) SetBillingGroupArn(v string) *AccountAs } // The set of accounts that will be under the billing group. The set of accounts -// resemble the linked accounts in a consolidated family. +// resemble the linked accounts in a consolidated billing family. type AccountGrouping struct { _ struct{} `type:"structure"` + // Specifies if this billing group will automatically associate newly added + // Amazon Web Services accounts that join your consolidated billing family. + AutoAssociate *bool `type:"boolean"` + // The account IDs that make up the billing group. Account IDs must be a part // of the consolidated billing family, and not associated with another billing // group. @@ -3679,6 +3652,12 @@ func (s *AccountGrouping) Validate() error { return nil } +// SetAutoAssociate sets the AutoAssociate field's value. +func (s *AccountGrouping) SetAutoAssociate(v bool) *AccountGrouping { + s.AutoAssociate = &v + return s +} + // SetLinkedAccountIds sets the LinkedAccountIds field's value. func (s *AccountGrouping) SetLinkedAccountIds(v []*string) *AccountGrouping { s.LinkedAccountIds = v @@ -4279,6 +4258,10 @@ func (s *BillingGroupCostReportElement) SetProformaCost(v string) *BillingGroupC type BillingGroupListElement struct { _ struct{} `type:"structure"` + // Specifies if the billing group has automatic account association (AutoAssociate) + // enabled. + AccountGrouping *ListBillingGroupAccountGrouping `type:"structure"` + // The Amazon Resource Number (ARN) that can be used to uniquely identify the // billing group. Arn *string `type:"string"` @@ -4338,6 +4321,12 @@ func (s BillingGroupListElement) GoString() string { return s.String() } +// SetAccountGrouping sets the AccountGrouping field's value. +func (s *BillingGroupListElement) SetAccountGrouping(v *ListBillingGroupAccountGrouping) *BillingGroupListElement { + s.AccountGrouping = v + return s +} + // SetArn sets the Arn field's value. func (s *BillingGroupListElement) SetArn(v string) *BillingGroupListElement { s.Arn = &v @@ -4528,7 +4517,7 @@ type CreateBillingGroupInput struct { _ struct{} `type:"structure"` // The set of accounts that will be under the billing group. The set of accounts - // resemble the linked accounts in a consolidated family. + // resemble the linked accounts in a consolidated billing family. // // AccountGrouping is a required field AccountGrouping *AccountGrouping `type:"structure" required:"true"` @@ -6697,6 +6686,39 @@ func (s *ListAccountAssociationsOutput) SetNextToken(v string) *ListAccountAssoc return s } +// Specifies if the billing group has the following features enabled. +type ListBillingGroupAccountGrouping struct { + _ struct{} `type:"structure"` + + // Specifies if this billing group will automatically associate newly added + // Amazon Web Services accounts that join your consolidated billing family. + AutoAssociate *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBillingGroupAccountGrouping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBillingGroupAccountGrouping) GoString() string { + return s.String() +} + +// SetAutoAssociate sets the AutoAssociate field's value. +func (s *ListBillingGroupAccountGrouping) SetAutoAssociate(v bool) *ListBillingGroupAccountGrouping { + s.AutoAssociate = &v + return s +} + // The filter used to retrieve specific BillingGroupCostReportElements. type ListBillingGroupCostReportsFilter struct { _ struct{} `type:"structure"` @@ -6868,6 +6890,10 @@ type ListBillingGroupsFilter struct { // The list of billing group Amazon Resource Names (ARNs) to retrieve information. Arns []*string `min:"1" type:"list"` + // Specifies if this billing group will automatically associate newly added + // Amazon Web Services accounts that join your consolidated billing family. + AutoAssociate *bool `type:"boolean"` + // The pricing plan Amazon Resource Names (ARNs) to retrieve information. PricingPlan *string `type:"string"` @@ -6916,6 +6942,12 @@ func (s *ListBillingGroupsFilter) SetArns(v []*string) *ListBillingGroupsFilter return s } +// SetAutoAssociate sets the AutoAssociate field's value. +func (s *ListBillingGroupsFilter) SetAutoAssociate(v bool) *ListBillingGroupsFilter { + s.AutoAssociate = &v + return s +} + // SetPricingPlan sets the PricingPlan field's value. func (s *ListBillingGroupsFilter) SetPricingPlan(v string) *ListBillingGroupsFilter { s.PricingPlan = &v @@ -9161,9 +9193,46 @@ func (s UntagResourceOutput) GoString() string { return s.String() } +// Specifies if the billing group has the following features enabled. +type UpdateBillingGroupAccountGrouping struct { + _ struct{} `type:"structure"` + + // Specifies if this billing group will automatically associate newly added + // Amazon Web Services accounts that join your consolidated billing family. + AutoAssociate *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateBillingGroupAccountGrouping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateBillingGroupAccountGrouping) GoString() string { + return s.String() +} + +// SetAutoAssociate sets the AutoAssociate field's value. +func (s *UpdateBillingGroupAccountGrouping) SetAutoAssociate(v bool) *UpdateBillingGroupAccountGrouping { + s.AutoAssociate = &v + return s +} + type UpdateBillingGroupInput struct { _ struct{} `type:"structure"` + // Specifies if the billing group has automatic account association (AutoAssociate) + // enabled. + AccountGrouping *UpdateBillingGroupAccountGrouping `type:"structure"` + // The Amazon Resource Name (ARN) of the billing group being updated. // // Arn is a required field @@ -9230,6 +9299,12 @@ func (s *UpdateBillingGroupInput) Validate() error { return nil } +// SetAccountGrouping sets the AccountGrouping field's value. +func (s *UpdateBillingGroupInput) SetAccountGrouping(v *UpdateBillingGroupAccountGrouping) *UpdateBillingGroupInput { + s.AccountGrouping = v + return s +} + // SetArn sets the Arn field's value. func (s *UpdateBillingGroupInput) SetArn(v string) *UpdateBillingGroupInput { s.Arn = &v @@ -9263,6 +9338,10 @@ func (s *UpdateBillingGroupInput) SetStatus(v string) *UpdateBillingGroupInput { type UpdateBillingGroupOutput struct { _ struct{} `type:"structure"` + // Specifies if the billing group has automatic account association (AutoAssociate) + // enabled. + AccountGrouping *UpdateBillingGroupAccountGrouping `type:"structure"` + // The Amazon Resource Name (ARN) of the billing group that was updated. Arn *string `type:"string"` @@ -9318,6 +9397,12 @@ func (s UpdateBillingGroupOutput) GoString() string { return s.String() } +// SetAccountGrouping sets the AccountGrouping field's value. +func (s *UpdateBillingGroupOutput) SetAccountGrouping(v *UpdateBillingGroupAccountGrouping) *UpdateBillingGroupOutput { + s.AccountGrouping = v + return s +} + // SetArn sets the Arn field's value. func (s *UpdateBillingGroupOutput) SetArn(v string) *UpdateBillingGroupOutput { s.Arn = &v @@ -10254,8 +10339,7 @@ func (s *UpdateTieringInput_) SetFreeTier(v *UpdateFreeTierConfig) *UpdateTierin return s } -// The input doesn't match with the constraints specified by Amazon Web Services -// services. +// The input doesn't match with the constraints specified by Amazon Web Services. type ValidationException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -10705,6 +10789,12 @@ const ( // ValidationExceptionReasonInvalidFilter is a ValidationExceptionReason enum value ValidationExceptionReasonInvalidFilter = "INVALID_FILTER" + + // ValidationExceptionReasonTooManyAutoAssociateBillingGroups is a ValidationExceptionReason enum value + ValidationExceptionReasonTooManyAutoAssociateBillingGroups = "TOO_MANY_AUTO_ASSOCIATE_BILLING_GROUPS" + + // ValidationExceptionReasonCannotDeleteAutoAssociateBillingGroup is a ValidationExceptionReason enum value + ValidationExceptionReasonCannotDeleteAutoAssociateBillingGroup = "CANNOT_DELETE_AUTO_ASSOCIATE_BILLING_GROUP" ) // ValidationExceptionReason_Values returns all elements of the ValidationExceptionReason enum @@ -10767,5 +10857,7 @@ func ValidationExceptionReason_Values() []string { ValidationExceptionReasonIllegalUsageType, ValidationExceptionReasonInvalidSkuCombo, ValidationExceptionReasonInvalidFilter, + ValidationExceptionReasonTooManyAutoAssociateBillingGroups, + ValidationExceptionReasonCannotDeleteAutoAssociateBillingGroup, } } diff --git a/service/billingconductor/errors.go b/service/billingconductor/errors.go index cd20130f34f..e113d4c2f64 100644 --- a/service/billingconductor/errors.go +++ b/service/billingconductor/errors.go @@ -47,8 +47,7 @@ const ( // ErrCodeValidationException for service response error code // "ValidationException". // - // The input doesn't match with the constraints specified by Amazon Web Services - // services. + // The input doesn't match with the constraints specified by Amazon Web Services. ErrCodeValidationException = "ValidationException" ) diff --git a/service/connectwisdomservice/api.go b/service/connectwisdomservice/api.go index 080237c6488..6cd3a936244 100644 --- a/service/connectwisdomservice/api.go +++ b/service/connectwisdomservice/api.go @@ -3574,6 +3574,9 @@ type AssistantData struct { // The description. Description *string `locationName:"description" min:"1" type:"string"` + // The configuration information for the Wisdom assistant integration. + IntegrationConfiguration *AssistantIntegrationConfiguration `locationName:"integrationConfiguration" type:"structure"` + // The name. // // Name is a required field @@ -3632,6 +3635,12 @@ func (s *AssistantData) SetDescription(v string) *AssistantData { return s } +// SetIntegrationConfiguration sets the IntegrationConfiguration field's value. +func (s *AssistantData) SetIntegrationConfiguration(v *AssistantIntegrationConfiguration) *AssistantData { + s.IntegrationConfiguration = v + return s +} + // SetName sets the Name field's value. func (s *AssistantData) SetName(v string) *AssistantData { s.Name = &v @@ -3662,6 +3671,39 @@ func (s *AssistantData) SetType(v string) *AssistantData { return s } +// The configuration information for the Wisdom assistant integration. +type AssistantIntegrationConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the integrated Amazon SNS topic used for + // streaming chat messages. + TopicIntegrationArn *string `locationName:"topicIntegrationArn" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssistantIntegrationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssistantIntegrationConfiguration) GoString() string { + return s.String() +} + +// SetTopicIntegrationArn sets the TopicIntegrationArn field's value. +func (s *AssistantIntegrationConfiguration) SetTopicIntegrationArn(v string) *AssistantIntegrationConfiguration { + s.TopicIntegrationArn = &v + return s +} + // Summary information about the assistant. type AssistantSummary struct { _ struct{} `type:"structure"` @@ -3679,6 +3721,9 @@ type AssistantSummary struct { // The description of the assistant. Description *string `locationName:"description" min:"1" type:"string"` + // The configuration information for the Wisdom assistant integration. + IntegrationConfiguration *AssistantIntegrationConfiguration `locationName:"integrationConfiguration" type:"structure"` + // The name of the assistant. // // Name is a required field @@ -3737,6 +3782,12 @@ func (s *AssistantSummary) SetDescription(v string) *AssistantSummary { return s } +// SetIntegrationConfiguration sets the IntegrationConfiguration field's value. +func (s *AssistantSummary) SetIntegrationConfiguration(v *AssistantIntegrationConfiguration) *AssistantSummary { + s.IntegrationConfiguration = v + return s +} + // SetName sets the Name field's value. func (s *AssistantSummary) SetName(v string) *AssistantSummary { s.Name = &v @@ -8315,6 +8366,9 @@ type SessionData struct { // The description of the session. Description *string `locationName:"description" min:"1" type:"string"` + // The configuration information for the session integration. + IntegrationConfiguration *SessionIntegrationConfiguration `locationName:"integrationConfiguration" type:"structure"` + // The name of the session. // // Name is a required field @@ -8358,6 +8412,12 @@ func (s *SessionData) SetDescription(v string) *SessionData { return s } +// SetIntegrationConfiguration sets the IntegrationConfiguration field's value. +func (s *SessionData) SetIntegrationConfiguration(v *SessionIntegrationConfiguration) *SessionData { + s.IntegrationConfiguration = v + return s +} + // SetName sets the Name field's value. func (s *SessionData) SetName(v string) *SessionData { s.Name = &v @@ -8382,6 +8442,39 @@ func (s *SessionData) SetTags(v map[string]*string) *SessionData { return s } +// The configuration information for the session integration. +type SessionIntegrationConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the integrated Amazon SNS topic used for + // streaming chat messages. + TopicIntegrationArn *string `locationName:"topicIntegrationArn" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SessionIntegrationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SessionIntegrationConfiguration) GoString() string { + return s.String() +} + +// SetTopicIntegrationArn sets the TopicIntegrationArn field's value. +func (s *SessionIntegrationConfiguration) SetTopicIntegrationArn(v string) *SessionIntegrationConfiguration { + s.TopicIntegrationArn = &v + return s +} + // Summary information about the session. type SessionSummary struct { _ struct{} `type:"structure"` diff --git a/service/customerprofiles/api.go b/service/customerprofiles/api.go index 5cf800b09bc..58a14d48d34 100644 --- a/service/customerprofiles/api.go +++ b/service/customerprofiles/api.go @@ -2396,6 +2396,99 @@ func (c *CustomerProfiles) GetProfileObjectTypeTemplateWithContext(ctx aws.Conte return out, req.Send() } +const opGetSimilarProfiles = "GetSimilarProfiles" + +// GetSimilarProfilesRequest generates a "aws/request.Request" representing the +// client's request for the GetSimilarProfiles operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSimilarProfiles for more information on using the GetSimilarProfiles +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetSimilarProfilesRequest method. +// req, resp := client.GetSimilarProfilesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/customer-profiles-2020-08-15/GetSimilarProfiles +func (c *CustomerProfiles) GetSimilarProfilesRequest(input *GetSimilarProfilesInput) (req *request.Request, output *GetSimilarProfilesOutput) { + op := &request.Operation{ + Name: opGetSimilarProfiles, + HTTPMethod: "POST", + HTTPPath: "/domains/{DomainName}/matches", + } + + if input == nil { + input = &GetSimilarProfilesInput{} + } + + output = &GetSimilarProfilesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSimilarProfiles API operation for Amazon Connect Customer Profiles. +// +// Returns a set of profiles that belong to the same matching group using the +// matchId or profileId. You can also specify the type of matching that you +// want for finding similar profiles using either RULE_BASED_MATCHING or ML_BASED_MATCHING. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Connect Customer Profiles's +// API operation GetSimilarProfiles for usage and error information. +// +// Returned Error Types: +// +// - BadRequestException +// The input you provided is invalid. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - ResourceNotFoundException +// The requested resource does not exist, or access was denied. +// +// - ThrottlingException +// You exceeded the maximum number of requests. +// +// - InternalServerException +// An internal service error occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/customer-profiles-2020-08-15/GetSimilarProfiles +func (c *CustomerProfiles) GetSimilarProfiles(input *GetSimilarProfilesInput) (*GetSimilarProfilesOutput, error) { + req, out := c.GetSimilarProfilesRequest(input) + return out, req.Send() +} + +// GetSimilarProfilesWithContext is the same as GetSimilarProfiles with the addition of +// the ability to pass a context and additional request options. +// +// See GetSimilarProfiles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CustomerProfiles) GetSimilarProfilesWithContext(ctx aws.Context, input *GetSimilarProfilesInput, opts ...request.Option) (*GetSimilarProfilesOutput, error) { + req, out := c.GetSimilarProfilesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetWorkflow = "GetWorkflow" // GetWorkflowRequest generates a "aws/request.Request" representing the @@ -3546,6 +3639,97 @@ func (c *CustomerProfiles) ListProfileObjectsWithContext(ctx aws.Context, input return out, req.Send() } +const opListRuleBasedMatches = "ListRuleBasedMatches" + +// ListRuleBasedMatchesRequest generates a "aws/request.Request" representing the +// client's request for the ListRuleBasedMatches operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListRuleBasedMatches for more information on using the ListRuleBasedMatches +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListRuleBasedMatchesRequest method. +// req, resp := client.ListRuleBasedMatchesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/customer-profiles-2020-08-15/ListRuleBasedMatches +func (c *CustomerProfiles) ListRuleBasedMatchesRequest(input *ListRuleBasedMatchesInput) (req *request.Request, output *ListRuleBasedMatchesOutput) { + op := &request.Operation{ + Name: opListRuleBasedMatches, + HTTPMethod: "GET", + HTTPPath: "/domains/{DomainName}/profiles/ruleBasedMatches", + } + + if input == nil { + input = &ListRuleBasedMatchesInput{} + } + + output = &ListRuleBasedMatchesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListRuleBasedMatches API operation for Amazon Connect Customer Profiles. +// +// Returns a set of MatchIds that belong to the given domain. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Connect Customer Profiles's +// API operation ListRuleBasedMatches for usage and error information. +// +// Returned Error Types: +// +// - BadRequestException +// The input you provided is invalid. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - ResourceNotFoundException +// The requested resource does not exist, or access was denied. +// +// - ThrottlingException +// You exceeded the maximum number of requests. +// +// - InternalServerException +// An internal service error occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/customer-profiles-2020-08-15/ListRuleBasedMatches +func (c *CustomerProfiles) ListRuleBasedMatches(input *ListRuleBasedMatchesInput) (*ListRuleBasedMatchesOutput, error) { + req, out := c.ListRuleBasedMatchesRequest(input) + return out, req.Send() +} + +// ListRuleBasedMatchesWithContext is the same as ListRuleBasedMatches with the addition of +// the ability to pass a context and additional request options. +// +// See ListRuleBasedMatches for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CustomerProfiles) ListRuleBasedMatchesWithContext(ctx aws.Context, input *ListRuleBasedMatchesInput, opts ...request.Option) (*ListRuleBasedMatchesOutput, error) { + req, out := c.ListRuleBasedMatchesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListTagsForResource = "ListTagsForResource" // ListTagsForResourceRequest generates a "aws/request.Request" representing the @@ -5569,6 +5753,128 @@ func (s *AttributeItem) SetName(v string) *AttributeItem { return s } +// Configuration information about the AttributeTypesSelector where the rule-based +// identity resolution uses to match profiles. You can choose how profiles are +// compared across attribute types and which attribute to use for matching from +// each type. There are three attribute types you can configure: +// +// - Email type You can choose from Email, BusinessEmail, and PersonalEmail +// +// - Phone number type You can choose from Phone, HomePhone, and MobilePhone +// +// - Address type You can choose from Address, BusinessAddress, MaillingAddress, +// and ShippingAddress +// +// You can either choose ONE_TO_ONE or MANY_TO_MANY as the AttributeMatchingModel. +// When choosing MANY_TO_MANY, the system can match attribute across the sub-types +// of an attribute type. For example, if the value of the Email field of Profile +// A and the value of BusinessEmail field of Profile B matches, the two profiles +// are matched on the Email type. When choosing ONE_TO_ONE the system can only +// match if the sub-types are exact matches. For example, only when the value +// of the Email field of Profile A and the value of the Email field of Profile +// B matches, the two profiles are matched on the Email type. +type AttributeTypesSelector struct { + _ struct{} `type:"structure"` + + // The Address type. You can choose from Address, BusinessAddress, MaillingAddress, + // and ShippingAddress. + // + // You only can use the Address type in the MatchingRule. For example, if you + // want to match profile based on BusinessAddress.City or MaillingAddress.City, + // you need to choose the BusinessAddress and the MaillingAddress to represent + // the Address type and specify the Address.City on the matching rule. + Address []*string `min:"1" type:"list"` + + // Configures the AttributeMatchingModel, you can either choose ONE_TO_ONE or + // MANY_TO_MANY. + // + // AttributeMatchingModel is a required field + AttributeMatchingModel *string `type:"string" required:"true" enum:"AttributeMatchingModel"` + + // The Email type. You can choose from EmailAddress, BusinessEmailAddress and + // PersonalEmailAddress. + // + // You only can use the EmailAddress type in the MatchingRule. For example, + // if you want to match profile based on PersonalEmailAddress or BusinessEmailAddress, + // you need to choose the PersonalEmailAddress and the BusinessEmailAddress + // to represent the EmailAddress type and only specify the EmailAddress on the + // matching rule. + EmailAddress []*string `min:"1" type:"list"` + + // The PhoneNumber type. You can choose from PhoneNumber, HomePhoneNumber, and + // MobilePhoneNumber. + // + // You only can use the PhoneNumber type in the MatchingRule. For example, if + // you want to match a profile based on Phone or HomePhone, you need to choose + // the Phone and the HomePhone to represent the PhoneNumber type and only specify + // the PhoneNumber on the matching rule. + PhoneNumber []*string `min:"1" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AttributeTypesSelector) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AttributeTypesSelector) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttributeTypesSelector) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttributeTypesSelector"} + if s.Address != nil && len(s.Address) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Address", 1)) + } + if s.AttributeMatchingModel == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeMatchingModel")) + } + if s.EmailAddress != nil && len(s.EmailAddress) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EmailAddress", 1)) + } + if s.PhoneNumber != nil && len(s.PhoneNumber) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PhoneNumber", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddress sets the Address field's value. +func (s *AttributeTypesSelector) SetAddress(v []*string) *AttributeTypesSelector { + s.Address = v + return s +} + +// SetAttributeMatchingModel sets the AttributeMatchingModel field's value. +func (s *AttributeTypesSelector) SetAttributeMatchingModel(v string) *AttributeTypesSelector { + s.AttributeMatchingModel = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *AttributeTypesSelector) SetEmailAddress(v []*string) *AttributeTypesSelector { + s.EmailAddress = v + return s +} + +// SetPhoneNumber sets the PhoneNumber field's value. +func (s *AttributeTypesSelector) SetPhoneNumber(v []*string) *AttributeTypesSelector { + s.PhoneNumber = v + return s +} + // Configuration settings for how to perform the auto-merging of profiles. type AutoMerging struct { _ struct{} `type:"structure"` @@ -6334,6 +6640,14 @@ type CreateDomainInput struct { // in the MatchingRequest, you can download the results from S3. Matching *MatchingRequest `type:"structure"` + // The process of matching duplicate profiles using the Rule-Based matching. + // If RuleBasedMatching = true, Amazon Connect Customer Profiles will start + // to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. + // You can use the ListRuleBasedMatches and GetSimilarProfiles API to return + // and review the results. Also, if you have configured ExportingConfig in the + // RuleBasedMatchingRequest, you can download the results from S3. + RuleBasedMatching *RuleBasedMatchingRequest `type:"structure"` + // The tags used to organize, track, or control access for this resource. Tags map[string]*string `min:"1" type:"map"` } @@ -6379,6 +6693,11 @@ func (s *CreateDomainInput) Validate() error { invalidParams.AddNested("Matching", err.(request.ErrInvalidParams)) } } + if s.RuleBasedMatching != nil { + if err := s.RuleBasedMatching.Validate(); err != nil { + invalidParams.AddNested("RuleBasedMatching", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -6416,6 +6735,12 @@ func (s *CreateDomainInput) SetMatching(v *MatchingRequest) *CreateDomainInput { return s } +// SetRuleBasedMatching sets the RuleBasedMatching field's value. +func (s *CreateDomainInput) SetRuleBasedMatching(v *RuleBasedMatchingRequest) *CreateDomainInput { + s.RuleBasedMatching = v + return s +} + // SetTags sets the Tags field's value. func (s *CreateDomainInput) SetTags(v map[string]*string) *CreateDomainInput { s.Tags = v @@ -6465,6 +6790,14 @@ type CreateDomainOutput struct { // in the MatchingRequest, you can download the results from S3. Matching *MatchingResponse `type:"structure"` + // The process of matching duplicate profiles using the Rule-Based matching. + // If RuleBasedMatching = true, Amazon Connect Customer Profiles will start + // to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. + // You can use the ListRuleBasedMatches and GetSimilarProfiles API to return + // and review the results. Also, if you have configured ExportingConfig in the + // RuleBasedMatchingRequest, you can download the results from S3. + RuleBasedMatching *RuleBasedMatchingResponse `type:"structure"` + // The tags used to organize, track, or control access for this resource. Tags map[string]*string `min:"1" type:"map"` } @@ -6529,6 +6862,12 @@ func (s *CreateDomainOutput) SetMatching(v *MatchingResponse) *CreateDomainOutpu return s } +// SetRuleBasedMatching sets the RuleBasedMatching field's value. +func (s *CreateDomainOutput) SetRuleBasedMatching(v *RuleBasedMatchingResponse) *CreateDomainOutput { + s.RuleBasedMatching = v + return s +} + // SetTags sets the Tags field's value. func (s *CreateDomainOutput) SetTags(v map[string]*string) *CreateDomainOutput { s.Tags = v @@ -9414,6 +9753,14 @@ type GetDomainOutput struct { // in the MatchingRequest, you can download the results from S3. Matching *MatchingResponse `type:"structure"` + // The process of matching duplicate profiles using the Rule-Based matching. + // If RuleBasedMatching = true, Amazon Connect Customer Profiles will start + // to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. + // You can use the ListRuleBasedMatches and GetSimilarProfiles API to return + // and review the results. Also, if you have configured ExportingConfig in the + // RuleBasedMatchingRequest, you can download the results from S3. + RuleBasedMatching *RuleBasedMatchingResponse `type:"structure"` + // Usage-specific statistics about the domain. Stats *DomainStats `type:"structure"` @@ -9481,6 +9828,12 @@ func (s *GetDomainOutput) SetMatching(v *MatchingResponse) *GetDomainOutput { return s } +// SetRuleBasedMatching sets the RuleBasedMatching field's value. +func (s *GetDomainOutput) SetRuleBasedMatching(v *RuleBasedMatchingResponse) *GetDomainOutput { + s.RuleBasedMatching = v + return s +} + // SetStats sets the Stats field's value. func (s *GetDomainOutput) SetStats(v *DomainStats) *GetDomainOutput { s.Stats = v @@ -10522,18 +10875,34 @@ func (s *GetProfileObjectTypeTemplateOutput) SetTemplateId(v string) *GetProfile return s } -type GetWorkflowInput struct { - _ struct{} `type:"structure" nopayload:"true"` +type GetSimilarProfilesInput struct { + _ struct{} `type:"structure"` // The unique name of the domain. // // DomainName is a required field DomainName *string `location:"uri" locationName:"DomainName" min:"1" type:"string" required:"true"` - // Unique identifier for the workflow. + // Specify the type of matching to get similar profiles for. // - // WorkflowId is a required field - WorkflowId *string `location:"uri" locationName:"WorkflowId" type:"string" required:"true"` + // MatchType is a required field + MatchType *string `type:"string" required:"true" enum:"MatchType"` + + // The maximum number of objects returned per page. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The pagination token from the previous GetSimilarProfiles API call. + NextToken *string `location:"querystring" locationName:"next-token" min:"1" type:"string"` + + // The string indicating the search key to be used. + // + // SearchKey is a required field + SearchKey *string `min:"1" type:"string" required:"true"` + + // The string based on SearchKey to be searched for similar profiles. + // + // SearchValue is a required field + SearchValue *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -10541,7 +10910,7 @@ type GetWorkflowInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetWorkflowInput) String() string { +func (s GetSimilarProfilesInput) String() string { return awsutil.Prettify(s) } @@ -10550,24 +10919,39 @@ func (s GetWorkflowInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetWorkflowInput) GoString() string { +func (s GetSimilarProfilesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetWorkflowInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetWorkflowInput"} +func (s *GetSimilarProfilesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSimilarProfilesInput"} if s.DomainName == nil { invalidParams.Add(request.NewErrParamRequired("DomainName")) } if s.DomainName != nil && len(*s.DomainName) < 1 { invalidParams.Add(request.NewErrParamMinLen("DomainName", 1)) } - if s.WorkflowId == nil { - invalidParams.Add(request.NewErrParamRequired("WorkflowId")) + if s.MatchType == nil { + invalidParams.Add(request.NewErrParamRequired("MatchType")) } - if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.SearchKey == nil { + invalidParams.Add(request.NewErrParamRequired("SearchKey")) + } + if s.SearchKey != nil && len(*s.SearchKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SearchKey", 1)) + } + if s.SearchValue == nil { + invalidParams.Add(request.NewErrParamRequired("SearchValue")) + } + if s.SearchValue != nil && len(*s.SearchValue) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SearchValue", 1)) } if invalidParams.Len() > 0 { @@ -10577,22 +10961,196 @@ func (s *GetWorkflowInput) Validate() error { } // SetDomainName sets the DomainName field's value. -func (s *GetWorkflowInput) SetDomainName(v string) *GetWorkflowInput { +func (s *GetSimilarProfilesInput) SetDomainName(v string) *GetSimilarProfilesInput { s.DomainName = &v return s } -// SetWorkflowId sets the WorkflowId field's value. -func (s *GetWorkflowInput) SetWorkflowId(v string) *GetWorkflowInput { - s.WorkflowId = &v +// SetMatchType sets the MatchType field's value. +func (s *GetSimilarProfilesInput) SetMatchType(v string) *GetSimilarProfilesInput { + s.MatchType = &v return s } -type GetWorkflowOutput struct { - _ struct{} `type:"structure"` +// SetMaxResults sets the MaxResults field's value. +func (s *GetSimilarProfilesInput) SetMaxResults(v int64) *GetSimilarProfilesInput { + s.MaxResults = &v + return s +} - // Attributes provided for workflow execution. - Attributes *WorkflowAttributes `type:"structure"` +// SetNextToken sets the NextToken field's value. +func (s *GetSimilarProfilesInput) SetNextToken(v string) *GetSimilarProfilesInput { + s.NextToken = &v + return s +} + +// SetSearchKey sets the SearchKey field's value. +func (s *GetSimilarProfilesInput) SetSearchKey(v string) *GetSimilarProfilesInput { + s.SearchKey = &v + return s +} + +// SetSearchValue sets the SearchValue field's value. +func (s *GetSimilarProfilesInput) SetSearchValue(v string) *GetSimilarProfilesInput { + s.SearchValue = &v + return s +} + +type GetSimilarProfilesOutput struct { + _ struct{} `type:"structure"` + + // It only has value when the MatchType is ML_BASED_MATCHING.A number between + // 0 and 1, where a higher score means higher similarity. Examining match confidence + // scores lets you distinguish between groups of similar records in which the + // system is highly confident (which you may decide to merge), groups of similar + // records about which the system is uncertain (which you may decide to have + // reviewed by a human), and groups of similar records that the system deems + // to be unlikely (which you may decide to reject). Given confidence scores + // vary as per the data input, it should not be used as an absolute measure + // of matching quality. + ConfidenceScore *float64 `type:"double"` + + // The string matchId that the similar profiles belong to. + MatchId *string `min:"1" type:"string"` + + // Specify the type of matching to get similar profiles for. + MatchType *string `type:"string" enum:"MatchType"` + + // The pagination token from the previous GetSimilarProfiles API call. + NextToken *string `min:"1" type:"string"` + + // Set of profileIds that belong to the same matching group. + ProfileIds []*string `type:"list"` + + // The integer rule level that the profiles matched on. + RuleLevel *int64 `min:"1" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSimilarProfilesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSimilarProfilesOutput) GoString() string { + return s.String() +} + +// SetConfidenceScore sets the ConfidenceScore field's value. +func (s *GetSimilarProfilesOutput) SetConfidenceScore(v float64) *GetSimilarProfilesOutput { + s.ConfidenceScore = &v + return s +} + +// SetMatchId sets the MatchId field's value. +func (s *GetSimilarProfilesOutput) SetMatchId(v string) *GetSimilarProfilesOutput { + s.MatchId = &v + return s +} + +// SetMatchType sets the MatchType field's value. +func (s *GetSimilarProfilesOutput) SetMatchType(v string) *GetSimilarProfilesOutput { + s.MatchType = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetSimilarProfilesOutput) SetNextToken(v string) *GetSimilarProfilesOutput { + s.NextToken = &v + return s +} + +// SetProfileIds sets the ProfileIds field's value. +func (s *GetSimilarProfilesOutput) SetProfileIds(v []*string) *GetSimilarProfilesOutput { + s.ProfileIds = v + return s +} + +// SetRuleLevel sets the RuleLevel field's value. +func (s *GetSimilarProfilesOutput) SetRuleLevel(v int64) *GetSimilarProfilesOutput { + s.RuleLevel = &v + return s +} + +type GetWorkflowInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The unique name of the domain. + // + // DomainName is a required field + DomainName *string `location:"uri" locationName:"DomainName" min:"1" type:"string" required:"true"` + + // Unique identifier for the workflow. + // + // WorkflowId is a required field + WorkflowId *string `location:"uri" locationName:"WorkflowId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetWorkflowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetWorkflowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetWorkflowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetWorkflowInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 1)) + } + if s.WorkflowId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowId")) + } + if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDomainName sets the DomainName field's value. +func (s *GetWorkflowInput) SetDomainName(v string) *GetWorkflowInput { + s.DomainName = &v + return s +} + +// SetWorkflowId sets the WorkflowId field's value. +func (s *GetWorkflowInput) SetWorkflowId(v string) *GetWorkflowInput { + s.WorkflowId = &v + return s +} + +type GetWorkflowOutput struct { + _ struct{} `type:"structure"` + + // Attributes provided for workflow execution. + Attributes *WorkflowAttributes `type:"structure"` // Workflow error messages during execution (if any). ErrorDescription *string `min:"1" type:"string"` @@ -12879,6 +13437,119 @@ func (s *ListProfileObjectsOutput) SetNextToken(v string) *ListProfileObjectsOut return s } +type ListRuleBasedMatchesInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The unique name of the domain. + // + // DomainName is a required field + DomainName *string `location:"uri" locationName:"DomainName" min:"1" type:"string" required:"true"` + + // The maximum number of MatchIds returned per page. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The pagination token from the previous ListRuleBasedMatches API call. + NextToken *string `location:"querystring" locationName:"next-token" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListRuleBasedMatchesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListRuleBasedMatchesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRuleBasedMatchesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRuleBasedMatchesInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDomainName sets the DomainName field's value. +func (s *ListRuleBasedMatchesInput) SetDomainName(v string) *ListRuleBasedMatchesInput { + s.DomainName = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListRuleBasedMatchesInput) SetMaxResults(v int64) *ListRuleBasedMatchesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListRuleBasedMatchesInput) SetNextToken(v string) *ListRuleBasedMatchesInput { + s.NextToken = &v + return s +} + +type ListRuleBasedMatchesOutput struct { + _ struct{} `type:"structure"` + + // The list of MatchIds for the given domain. + MatchIds []*string `type:"list"` + + // The pagination token from the previous ListRuleBasedMatches API call. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListRuleBasedMatchesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListRuleBasedMatchesOutput) GoString() string { + return s.String() +} + +// SetMatchIds sets the MatchIds field's value. +func (s *ListRuleBasedMatchesOutput) SetMatchIds(v []*string) *ListRuleBasedMatchesOutput { + s.MatchIds = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListRuleBasedMatchesOutput) SetNextToken(v string) *ListRuleBasedMatchesOutput { + s.NextToken = &v + return s +} + type ListTagsForResourceInput struct { _ struct{} `type:"structure" nopayload:"true"` @@ -13454,6 +14125,92 @@ func (s *MatchingResponse) SetJobSchedule(v *JobSchedule) *MatchingResponse { return s } +// Specifies how does the rule-based matching process should match profiles. +// You can choose from the following attributes to build the matching Rule: +// +// - AccountNumber +// +// - Address.Address +// +// - Address.City +// +// - Address.Country +// +// - Address.County +// +// - Address.PostalCode +// +// - Address.State +// +// - Address.Province +// +// - BirthDate +// +// - BusinessName +// +// - EmailAddress +// +// - FirstName +// +// - Gender +// +// - LastName +// +// - MiddleName +// +// - PhoneNumber +// +// - Any customized profile attributes that start with the Attributes +type MatchingRule struct { + _ struct{} `type:"structure"` + + // A single rule level of the MatchRules. Configures how the rule-based matching + // process should match profiles. + // + // Rule is a required field + Rule []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MatchingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MatchingRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MatchingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MatchingRule"} + if s.Rule == nil { + invalidParams.Add(request.NewErrParamRequired("Rule")) + } + if s.Rule != nil && len(s.Rule) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Rule", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRule sets the Rule field's value. +func (s *MatchingRule) SetRule(v []*string) *MatchingRule { + s.Rule = v + return s +} + type MergeProfilesInput struct { _ struct{} `type:"structure"` @@ -14838,6 +15595,268 @@ func (s *ResourceNotFoundException) RequestID() string { return s.RespMetadata.RequestID } +// The request to enable the rule-based matching. +type RuleBasedMatchingRequest struct { + _ struct{} `type:"structure"` + + // Configures information about the AttributeTypesSelector where the rule-based + // identity resolution uses to match profiles. + AttributeTypesSelector *AttributeTypesSelector `type:"structure"` + + // How the auto-merging process should resolve conflicts between different profiles. + ConflictResolution *ConflictResolution `type:"structure"` + + // The flag that enables the rule-based matching process of duplicate profiles. + // + // Enabled is a required field + Enabled *bool `type:"boolean" required:"true"` + + // Configuration information about the S3 bucket where Identity Resolution Jobs + // writes result files. + // + // You need to give Customer Profiles service principal write permission to + // your S3 bucket. Otherwise, you'll get an exception in the API response. For + // an example policy, see Amazon Connect Customer Profiles cross-service confused + // deputy prevention (https://docs.aws.amazon.com/connect/latest/adminguide/cross-service-confused-deputy-prevention.html#customer-profiles-cross-service). + ExportingConfig *ExportingConfig `type:"structure"` + + // Configures how the rule-based matching process should match profiles. You + // can have up to 15 MatchingRule in the MatchingRules. + MatchingRules []*MatchingRule `min:"1" type:"list"` + + // Indicates the maximum allowed rule level. + MaxAllowedRuleLevelForMatching *int64 `min:"1" type:"integer"` + + // MatchingRule (https://docs.aws.amazon.com/customerprofiles/latest/APIReference/API_MatchingRule.html) + MaxAllowedRuleLevelForMerging *int64 `min:"1" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RuleBasedMatchingRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RuleBasedMatchingRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RuleBasedMatchingRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RuleBasedMatchingRequest"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.MatchingRules != nil && len(s.MatchingRules) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MatchingRules", 1)) + } + if s.MaxAllowedRuleLevelForMatching != nil && *s.MaxAllowedRuleLevelForMatching < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxAllowedRuleLevelForMatching", 1)) + } + if s.MaxAllowedRuleLevelForMerging != nil && *s.MaxAllowedRuleLevelForMerging < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxAllowedRuleLevelForMerging", 1)) + } + if s.AttributeTypesSelector != nil { + if err := s.AttributeTypesSelector.Validate(); err != nil { + invalidParams.AddNested("AttributeTypesSelector", err.(request.ErrInvalidParams)) + } + } + if s.ConflictResolution != nil { + if err := s.ConflictResolution.Validate(); err != nil { + invalidParams.AddNested("ConflictResolution", err.(request.ErrInvalidParams)) + } + } + if s.ExportingConfig != nil { + if err := s.ExportingConfig.Validate(); err != nil { + invalidParams.AddNested("ExportingConfig", err.(request.ErrInvalidParams)) + } + } + if s.MatchingRules != nil { + for i, v := range s.MatchingRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MatchingRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeTypesSelector sets the AttributeTypesSelector field's value. +func (s *RuleBasedMatchingRequest) SetAttributeTypesSelector(v *AttributeTypesSelector) *RuleBasedMatchingRequest { + s.AttributeTypesSelector = v + return s +} + +// SetConflictResolution sets the ConflictResolution field's value. +func (s *RuleBasedMatchingRequest) SetConflictResolution(v *ConflictResolution) *RuleBasedMatchingRequest { + s.ConflictResolution = v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *RuleBasedMatchingRequest) SetEnabled(v bool) *RuleBasedMatchingRequest { + s.Enabled = &v + return s +} + +// SetExportingConfig sets the ExportingConfig field's value. +func (s *RuleBasedMatchingRequest) SetExportingConfig(v *ExportingConfig) *RuleBasedMatchingRequest { + s.ExportingConfig = v + return s +} + +// SetMatchingRules sets the MatchingRules field's value. +func (s *RuleBasedMatchingRequest) SetMatchingRules(v []*MatchingRule) *RuleBasedMatchingRequest { + s.MatchingRules = v + return s +} + +// SetMaxAllowedRuleLevelForMatching sets the MaxAllowedRuleLevelForMatching field's value. +func (s *RuleBasedMatchingRequest) SetMaxAllowedRuleLevelForMatching(v int64) *RuleBasedMatchingRequest { + s.MaxAllowedRuleLevelForMatching = &v + return s +} + +// SetMaxAllowedRuleLevelForMerging sets the MaxAllowedRuleLevelForMerging field's value. +func (s *RuleBasedMatchingRequest) SetMaxAllowedRuleLevelForMerging(v int64) *RuleBasedMatchingRequest { + s.MaxAllowedRuleLevelForMerging = &v + return s +} + +// The response of the Rule-based matching request. +type RuleBasedMatchingResponse struct { + _ struct{} `type:"structure"` + + // Configures information about the AttributeTypesSelector where the rule-based + // identity resolution uses to match profiles. + AttributeTypesSelector *AttributeTypesSelector `type:"structure"` + + // How the auto-merging process should resolve conflicts between different profiles. + ConflictResolution *ConflictResolution `type:"structure"` + + // The flag that enables the rule-based matching process of duplicate profiles. + Enabled *bool `type:"boolean"` + + // Configuration information about the S3 bucket where Identity Resolution Jobs + // writes result files. + // + // You need to give Customer Profiles service principal write permission to + // your S3 bucket. Otherwise, you'll get an exception in the API response. For + // an example policy, see Amazon Connect Customer Profiles cross-service confused + // deputy prevention (https://docs.aws.amazon.com/connect/latest/adminguide/cross-service-confused-deputy-prevention.html#customer-profiles-cross-service). + ExportingConfig *ExportingConfig `type:"structure"` + + // Configures how the rule-based matching process should match profiles. You + // can have up to 15 MatchingRule in the MatchingRules. + MatchingRules []*MatchingRule `min:"1" type:"list"` + + // Indicates the maximum allowed rule level. + MaxAllowedRuleLevelForMatching *int64 `min:"1" type:"integer"` + + // MatchingRule (https://docs.aws.amazon.com/customerprofiles/latest/APIReference/API_MatchingRule.html) + MaxAllowedRuleLevelForMerging *int64 `min:"1" type:"integer"` + + // PENDING + // + // * The first status after configuration a rule-based matching rule. If + // it is an existing domain, the rule-based Identity Resolution waits one + // hour before creating the matching rule. If it is a new domain, the system + // will skip the PENDING stage. + // + // IN_PROGRESS + // + // * The system is creating the rule-based matching rule. Under this status, + // the system is evaluating the existing data and you can no longer change + // the Rule-based matching configuration. + // + // ACTIVE + // + // * The rule is ready to use. You can change the rule a day after the status + // is in ACTIVE. + Status *string `type:"string" enum:"RuleBasedMatchingStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RuleBasedMatchingResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RuleBasedMatchingResponse) GoString() string { + return s.String() +} + +// SetAttributeTypesSelector sets the AttributeTypesSelector field's value. +func (s *RuleBasedMatchingResponse) SetAttributeTypesSelector(v *AttributeTypesSelector) *RuleBasedMatchingResponse { + s.AttributeTypesSelector = v + return s +} + +// SetConflictResolution sets the ConflictResolution field's value. +func (s *RuleBasedMatchingResponse) SetConflictResolution(v *ConflictResolution) *RuleBasedMatchingResponse { + s.ConflictResolution = v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *RuleBasedMatchingResponse) SetEnabled(v bool) *RuleBasedMatchingResponse { + s.Enabled = &v + return s +} + +// SetExportingConfig sets the ExportingConfig field's value. +func (s *RuleBasedMatchingResponse) SetExportingConfig(v *ExportingConfig) *RuleBasedMatchingResponse { + s.ExportingConfig = v + return s +} + +// SetMatchingRules sets the MatchingRules field's value. +func (s *RuleBasedMatchingResponse) SetMatchingRules(v []*MatchingRule) *RuleBasedMatchingResponse { + s.MatchingRules = v + return s +} + +// SetMaxAllowedRuleLevelForMatching sets the MaxAllowedRuleLevelForMatching field's value. +func (s *RuleBasedMatchingResponse) SetMaxAllowedRuleLevelForMatching(v int64) *RuleBasedMatchingResponse { + s.MaxAllowedRuleLevelForMatching = &v + return s +} + +// SetMaxAllowedRuleLevelForMerging sets the MaxAllowedRuleLevelForMerging field's value. +func (s *RuleBasedMatchingResponse) SetMaxAllowedRuleLevelForMerging(v int64) *RuleBasedMatchingResponse { + s.MaxAllowedRuleLevelForMerging = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *RuleBasedMatchingResponse) SetStatus(v string) *RuleBasedMatchingResponse { + s.Status = &v + return s +} + // Configuration information about the S3 bucket where Identity Resolution Jobs // write result files. type S3ExportingConfig struct { @@ -16486,6 +17505,14 @@ type UpdateDomainInput struct { // in the MatchingRequest, you can download the results from S3. Matching *MatchingRequest `type:"structure"` + // The process of matching duplicate profiles using the rule-Based matching. + // If RuleBasedMatching = true, Amazon Connect Customer Profiles will start + // to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. + // You can use the ListRuleBasedMatches and GetSimilarProfiles API to return + // and review the results. Also, if you have configured ExportingConfig in the + // RuleBasedMatchingRequest, you can download the results from S3. + RuleBasedMatching *RuleBasedMatchingRequest `type:"structure"` + // The tags used to organize, track, or control access for this resource. Tags map[string]*string `min:"1" type:"map"` } @@ -16528,6 +17555,11 @@ func (s *UpdateDomainInput) Validate() error { invalidParams.AddNested("Matching", err.(request.ErrInvalidParams)) } } + if s.RuleBasedMatching != nil { + if err := s.RuleBasedMatching.Validate(); err != nil { + invalidParams.AddNested("RuleBasedMatching", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -16565,6 +17597,12 @@ func (s *UpdateDomainInput) SetMatching(v *MatchingRequest) *UpdateDomainInput { return s } +// SetRuleBasedMatching sets the RuleBasedMatching field's value. +func (s *UpdateDomainInput) SetRuleBasedMatching(v *RuleBasedMatchingRequest) *UpdateDomainInput { + s.RuleBasedMatching = v + return s +} + // SetTags sets the Tags field's value. func (s *UpdateDomainInput) SetTags(v map[string]*string) *UpdateDomainInput { s.Tags = v @@ -16612,6 +17650,14 @@ type UpdateDomainOutput struct { // in the MatchingRequest, you can download the results from S3. Matching *MatchingResponse `type:"structure"` + // The process of matching duplicate profiles using the rule-Based matching. + // If RuleBasedMatching = true, Amazon Connect Customer Profiles will start + // to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. + // You can use the ListRuleBasedMatches and GetSimilarProfiles API to return + // and review the results. Also, if you have configured ExportingConfig in the + // RuleBasedMatchingRequest, you can download the results from S3. + RuleBasedMatching *RuleBasedMatchingResponse `type:"structure"` + // The tags used to organize, track, or control access for this resource. Tags map[string]*string `min:"1" type:"map"` } @@ -16676,6 +17722,12 @@ func (s *UpdateDomainOutput) SetMatching(v *MatchingResponse) *UpdateDomainOutpu return s } +// SetRuleBasedMatching sets the RuleBasedMatching field's value. +func (s *UpdateDomainOutput) SetRuleBasedMatching(v *RuleBasedMatchingResponse) *UpdateDomainOutput { + s.RuleBasedMatching = v + return s +} + // SetTags sets the Tags field's value. func (s *UpdateDomainOutput) SetTags(v map[string]*string) *UpdateDomainOutput { s.Tags = v @@ -17131,6 +18183,22 @@ func (s *ZendeskSourceProperties) SetObject(v string) *ZendeskSourceProperties { return s } +const ( + // AttributeMatchingModelOneToOne is a AttributeMatchingModel enum value + AttributeMatchingModelOneToOne = "ONE_TO_ONE" + + // AttributeMatchingModelManyToMany is a AttributeMatchingModel enum value + AttributeMatchingModelManyToMany = "MANY_TO_MANY" +) + +// AttributeMatchingModel_Values returns all elements of the AttributeMatchingModel enum +func AttributeMatchingModel_Values() []string { + return []string{ + AttributeMatchingModelOneToOne, + AttributeMatchingModelManyToMany, + } +} + const ( // ConflictResolvingModelRecency is a ConflictResolvingModel enum value ConflictResolvingModelRecency = "RECENCY" @@ -17403,6 +18471,22 @@ func MarketoConnectorOperator_Values() []string { } } +const ( + // MatchTypeRuleBasedMatching is a MatchType enum value + MatchTypeRuleBasedMatching = "RULE_BASED_MATCHING" + + // MatchTypeMlBasedMatching is a MatchType enum value + MatchTypeMlBasedMatching = "ML_BASED_MATCHING" +) + +// MatchType_Values returns all elements of the MatchType enum +func MatchType_Values() []string { + return []string{ + MatchTypeRuleBasedMatching, + MatchTypeMlBasedMatching, + } +} + const ( // OperatorEqualTo is a Operator enum value OperatorEqualTo = "EQUAL_TO" @@ -17511,6 +18595,26 @@ func PartyType_Values() []string { } } +const ( + // RuleBasedMatchingStatusPending is a RuleBasedMatchingStatus enum value + RuleBasedMatchingStatusPending = "PENDING" + + // RuleBasedMatchingStatusInProgress is a RuleBasedMatchingStatus enum value + RuleBasedMatchingStatusInProgress = "IN_PROGRESS" + + // RuleBasedMatchingStatusActive is a RuleBasedMatchingStatus enum value + RuleBasedMatchingStatusActive = "ACTIVE" +) + +// RuleBasedMatchingStatus_Values returns all elements of the RuleBasedMatchingStatus enum +func RuleBasedMatchingStatus_Values() []string { + return []string{ + RuleBasedMatchingStatusPending, + RuleBasedMatchingStatusInProgress, + RuleBasedMatchingStatusActive, + } +} + const ( // S3ConnectorOperatorProjection is a S3ConnectorOperator enum value S3ConnectorOperatorProjection = "PROJECTION" diff --git a/service/customerprofiles/customerprofilesiface/interface.go b/service/customerprofiles/customerprofilesiface/interface.go index f920da41674..f027762557f 100644 --- a/service/customerprofiles/customerprofilesiface/interface.go +++ b/service/customerprofiles/customerprofilesiface/interface.go @@ -160,6 +160,10 @@ type CustomerProfilesAPI interface { GetProfileObjectTypeTemplateWithContext(aws.Context, *customerprofiles.GetProfileObjectTypeTemplateInput, ...request.Option) (*customerprofiles.GetProfileObjectTypeTemplateOutput, error) GetProfileObjectTypeTemplateRequest(*customerprofiles.GetProfileObjectTypeTemplateInput) (*request.Request, *customerprofiles.GetProfileObjectTypeTemplateOutput) + GetSimilarProfiles(*customerprofiles.GetSimilarProfilesInput) (*customerprofiles.GetSimilarProfilesOutput, error) + GetSimilarProfilesWithContext(aws.Context, *customerprofiles.GetSimilarProfilesInput, ...request.Option) (*customerprofiles.GetSimilarProfilesOutput, error) + GetSimilarProfilesRequest(*customerprofiles.GetSimilarProfilesInput) (*request.Request, *customerprofiles.GetSimilarProfilesOutput) + GetWorkflow(*customerprofiles.GetWorkflowInput) (*customerprofiles.GetWorkflowOutput, error) GetWorkflowWithContext(aws.Context, *customerprofiles.GetWorkflowInput, ...request.Option) (*customerprofiles.GetWorkflowOutput, error) GetWorkflowRequest(*customerprofiles.GetWorkflowInput) (*request.Request, *customerprofiles.GetWorkflowOutput) @@ -211,6 +215,10 @@ type CustomerProfilesAPI interface { ListProfileObjectsWithContext(aws.Context, *customerprofiles.ListProfileObjectsInput, ...request.Option) (*customerprofiles.ListProfileObjectsOutput, error) ListProfileObjectsRequest(*customerprofiles.ListProfileObjectsInput) (*request.Request, *customerprofiles.ListProfileObjectsOutput) + ListRuleBasedMatches(*customerprofiles.ListRuleBasedMatchesInput) (*customerprofiles.ListRuleBasedMatchesOutput, error) + ListRuleBasedMatchesWithContext(aws.Context, *customerprofiles.ListRuleBasedMatchesInput, ...request.Option) (*customerprofiles.ListRuleBasedMatchesOutput, error) + ListRuleBasedMatchesRequest(*customerprofiles.ListRuleBasedMatchesInput) (*request.Request, *customerprofiles.ListRuleBasedMatchesOutput) + ListTagsForResource(*customerprofiles.ListTagsForResourceInput) (*customerprofiles.ListTagsForResourceOutput, error) ListTagsForResourceWithContext(aws.Context, *customerprofiles.ListTagsForResourceInput, ...request.Option) (*customerprofiles.ListTagsForResourceOutput, error) ListTagsForResourceRequest(*customerprofiles.ListTagsForResourceInput) (*request.Request, *customerprofiles.ListTagsForResourceOutput) diff --git a/service/datasync/api.go b/service/datasync/api.go index 4d3b642696f..134cf5658a3 100644 --- a/service/datasync/api.go +++ b/service/datasync/api.go @@ -282,6 +282,96 @@ func (c *DataSync) CreateAgentWithContext(ctx aws.Context, input *CreateAgentInp return out, req.Send() } +const opCreateLocationAzureBlob = "CreateLocationAzureBlob" + +// CreateLocationAzureBlobRequest generates a "aws/request.Request" representing the +// client's request for the CreateLocationAzureBlob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLocationAzureBlob for more information on using the CreateLocationAzureBlob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateLocationAzureBlobRequest method. +// req, resp := client.CreateLocationAzureBlobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationAzureBlob +func (c *DataSync) CreateLocationAzureBlobRequest(input *CreateLocationAzureBlobInput) (req *request.Request, output *CreateLocationAzureBlobOutput) { + op := &request.Operation{ + Name: opCreateLocationAzureBlob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLocationAzureBlobInput{} + } + + output = &CreateLocationAzureBlobOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateLocationAzureBlob API operation for AWS DataSync. +// +// Creates an endpoint for a Microsoft Azure Blob Storage container that DataSync +// can use as a transfer source or destination. +// +// Before you begin, make sure you know how DataSync accesses Azure Blob Storage +// (https://docs.aws.amazon.com/datasync/latest/userguide/creating-azure-blob-location.html#azure-blob-access) +// and works with access tiers (https://docs.aws.amazon.com/datasync/latest/userguide/creating-azure-blob-location.html#azure-blob-access-tiers) +// and blob types (https://docs.aws.amazon.com/datasync/latest/userguide/creating-azure-blob-location.html#blob-types). +// You also need a DataSync agent (https://docs.aws.amazon.com/datasync/latest/userguide/creating-azure-blob-location.html#azure-blob-creating-agent) +// that can connect to your container. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS DataSync's +// API operation CreateLocationAzureBlob for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// This exception is thrown when the client submits a malformed request. +// +// - InternalException +// This exception is thrown when an error occurs in the DataSync service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationAzureBlob +func (c *DataSync) CreateLocationAzureBlob(input *CreateLocationAzureBlobInput) (*CreateLocationAzureBlobOutput, error) { + req, out := c.CreateLocationAzureBlobRequest(input) + return out, req.Send() +} + +// CreateLocationAzureBlobWithContext is the same as CreateLocationAzureBlob with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLocationAzureBlob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DataSync) CreateLocationAzureBlobWithContext(ctx aws.Context, input *CreateLocationAzureBlobInput, opts ...request.Option) (*CreateLocationAzureBlobOutput, error) { + req, out := c.CreateLocationAzureBlobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateLocationEfs = "CreateLocationEfs" // CreateLocationEfsRequest generates a "aws/request.Request" representing the @@ -826,8 +916,8 @@ func (c *DataSync) CreateLocationNfsRequest(input *CreateLocationNfsInput) (req // CreateLocationNfs API operation for AWS DataSync. // -// Defines a file system on a Network File System (NFS) server that can be read -// from or written to. +// Creates an endpoint for an Network File System (NFS) file server that DataSync +// can use for a data transfer. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1085,8 +1175,10 @@ func (c *DataSync) CreateLocationSmbRequest(input *CreateLocationSmbInput) (req // CreateLocationSmb API operation for AWS DataSync. // // Creates an endpoint for a Server Message Block (SMB) file server that DataSync -// can access for a transfer. For more information, see Creating an SMB location -// (https://docs.aws.amazon.com/datasync/latest/userguide/create-smb-location.html). +// can use for a data transfer. +// +// Before you begin, make sure that you understand how DataSync accesses an +// SMB file server (https://docs.aws.amazon.com/datasync/latest/userguide/create-smb-location.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1635,6 +1727,89 @@ func (c *DataSync) DescribeDiscoveryJobWithContext(ctx aws.Context, input *Descr return out, req.Send() } +const opDescribeLocationAzureBlob = "DescribeLocationAzureBlob" + +// DescribeLocationAzureBlobRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLocationAzureBlob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLocationAzureBlob for more information on using the DescribeLocationAzureBlob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeLocationAzureBlobRequest method. +// req, resp := client.DescribeLocationAzureBlobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationAzureBlob +func (c *DataSync) DescribeLocationAzureBlobRequest(input *DescribeLocationAzureBlobInput) (req *request.Request, output *DescribeLocationAzureBlobOutput) { + op := &request.Operation{ + Name: opDescribeLocationAzureBlob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLocationAzureBlobInput{} + } + + output = &DescribeLocationAzureBlobOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLocationAzureBlob API operation for AWS DataSync. +// +// Provides details about how an DataSync transfer location for Microsoft Azure +// Blob Storage is configured. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS DataSync's +// API operation DescribeLocationAzureBlob for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// This exception is thrown when the client submits a malformed request. +// +// - InternalException +// This exception is thrown when an error occurs in the DataSync service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationAzureBlob +func (c *DataSync) DescribeLocationAzureBlob(input *DescribeLocationAzureBlobInput) (*DescribeLocationAzureBlobOutput, error) { + req, out := c.DescribeLocationAzureBlobRequest(input) + return out, req.Send() +} + +// DescribeLocationAzureBlobWithContext is the same as DescribeLocationAzureBlob with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLocationAzureBlob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DataSync) DescribeLocationAzureBlobWithContext(ctx aws.Context, input *DescribeLocationAzureBlobInput, opts ...request.Option) (*DescribeLocationAzureBlobOutput, error) { + req, out := c.DescribeLocationAzureBlobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeLocationEfs = "DescribeLocationEfs" // DescribeLocationEfsRequest generates a "aws/request.Request" representing the @@ -2879,7 +3054,7 @@ func (c *DataSync) DescribeTaskRequest(input *DescribeTaskInput) (req *request.R // DescribeTask API operation for AWS DataSync. // -// Returns metadata about a task. +// Provides information about an DataSync transfer task. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2961,7 +3136,7 @@ func (c *DataSync) DescribeTaskExecutionRequest(input *DescribeTaskExecutionInpu // DescribeTaskExecution API operation for AWS DataSync. // -// Returns detailed metadata about a task that is being executed. +// Provides information about an DataSync transfer task that's running. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4784,6 +4959,90 @@ func (c *DataSync) UpdateDiscoveryJobWithContext(ctx aws.Context, input *UpdateD return out, req.Send() } +const opUpdateLocationAzureBlob = "UpdateLocationAzureBlob" + +// UpdateLocationAzureBlobRequest generates a "aws/request.Request" representing the +// client's request for the UpdateLocationAzureBlob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateLocationAzureBlob for more information on using the UpdateLocationAzureBlob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateLocationAzureBlobRequest method. +// req, resp := client.UpdateLocationAzureBlobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/UpdateLocationAzureBlob +func (c *DataSync) UpdateLocationAzureBlobRequest(input *UpdateLocationAzureBlobInput) (req *request.Request, output *UpdateLocationAzureBlobOutput) { + op := &request.Operation{ + Name: opUpdateLocationAzureBlob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateLocationAzureBlobInput{} + } + + output = &UpdateLocationAzureBlobOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateLocationAzureBlob API operation for AWS DataSync. +// +// Modifies some configurations of the Microsoft Azure Blob Storage transfer +// location that you're using with DataSync. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS DataSync's +// API operation UpdateLocationAzureBlob for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// This exception is thrown when the client submits a malformed request. +// +// - InternalException +// This exception is thrown when an error occurs in the DataSync service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/UpdateLocationAzureBlob +func (c *DataSync) UpdateLocationAzureBlob(input *UpdateLocationAzureBlobInput) (*UpdateLocationAzureBlobOutput, error) { + req, out := c.UpdateLocationAzureBlobRequest(input) + return out, req.Send() +} + +// UpdateLocationAzureBlobWithContext is the same as UpdateLocationAzureBlob with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateLocationAzureBlob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DataSync) UpdateLocationAzureBlobWithContext(ctx aws.Context, input *UpdateLocationAzureBlobInput, opts ...request.Option) (*UpdateLocationAzureBlobOutput, error) { + req, out := c.UpdateLocationAzureBlobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateLocationHdfs = "UpdateLocationHdfs" // UpdateLocationHdfsRequest generates a "aws/request.Request" representing the @@ -5384,6 +5643,7 @@ type AddStorageSystemInput struct { // Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects // to and reads from your on-premises storage system's management interface. + // You can only specify one ARN. // // AgentArns is a required field AgentArns []*string `min:"1" type:"list" required:"true"` @@ -5628,6 +5888,70 @@ func (s *AgentListEntry) SetStatus(v string) *AgentListEntry { return s } +// The shared access signature (SAS) configuration that allows DataSync to access +// your Microsoft Azure Blob Storage. +// +// For more information, see SAS tokens (https://docs.aws.amazon.com/datasync/latest/userguide/creating-azure-blob-location.html#azure-blob-sas-tokens) +// for accessing your Azure Blob Storage. +type AzureBlobSasConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies a SAS token that provides permissions at the Azure storage account, + // container, or folder level. + // + // The token is part of the SAS URI string that comes after the storage resource + // URI and a question mark. A token looks something like this: + // + // sp=r&st=2023-12-20T14:54:52Z&se=2023-12-20T22:54:52Z&spr=https&sv=2021-06-08&sr=c&sig=aBBKDWQvyuVcTPH9EBp%2FXTI9E%2F%2Fmq171%2BZU178wcwqU%3D + // + // Token is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by AzureBlobSasConfiguration's + // String and GoString methods. + // + // Token is a required field + Token *string `min:"1" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AzureBlobSasConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AzureBlobSasConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AzureBlobSasConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AzureBlobSasConfiguration"} + if s.Token == nil { + invalidParams.Add(request.NewErrParamRequired("Token")) + } + if s.Token != nil && len(*s.Token) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Token", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetToken sets the Token field's value. +func (s *AzureBlobSasConfiguration) SetToken(v string) *AzureBlobSasConfiguration { + s.Token = &v + return s +} + // CancelTaskExecutionRequest type CancelTaskExecutionInput struct { _ struct{} `type:"structure"` @@ -5765,12 +6089,13 @@ type CreateAgentInput struct { // Specifies the Amazon Resource Name (ARN) of the security group that protects // your task's network interfaces (https://docs.aws.amazon.com/datasync/latest/userguide/datasync-network.html#required-network-interfaces) // when using a virtual private cloud (VPC) endpoint (https://docs.aws.amazon.com/datasync/latest/userguide/choose-service-endpoint.html#choose-service-endpoint-vpc). + // You can only specify one ARN. SecurityGroupArns []*string `min:"1" type:"list"` // Specifies the ARN of the subnet where you want to run your DataSync task // when using a VPC endpoint. This is the subnet where DataSync creates and // manages the network interfaces (https://docs.aws.amazon.com/datasync/latest/userguide/datasync-network.html#required-network-interfaces) - // for your transfer. + // for your transfer. You can only specify one ARN. SubnetArns []*string `min:"1" type:"list"` // Specifies labels that help you categorize, filter, and search for your Amazon @@ -5860,25 +6185,208 @@ func (s *CreateAgentInput) SetSubnetArns(v []*string) *CreateAgentInput { } // SetTags sets the Tags field's value. -func (s *CreateAgentInput) SetTags(v []*TagListEntry) *CreateAgentInput { +func (s *CreateAgentInput) SetTags(v []*TagListEntry) *CreateAgentInput { + s.Tags = v + return s +} + +// SetVpcEndpointId sets the VpcEndpointId field's value. +func (s *CreateAgentInput) SetVpcEndpointId(v string) *CreateAgentInput { + s.VpcEndpointId = &v + return s +} + +// CreateAgentResponse +type CreateAgentOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the agent that you just activated. Use the ListAgents (https://docs.aws.amazon.com/datasync/latest/userguide/API_ListAgents.html) + // operation to return a list of agents in your Amazon Web Services account + // and Amazon Web Services Region. + AgentArn *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateAgentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateAgentOutput) GoString() string { + return s.String() +} + +// SetAgentArn sets the AgentArn field's value. +func (s *CreateAgentOutput) SetAgentArn(v string) *CreateAgentOutput { + s.AgentArn = &v + return s +} + +type CreateLocationAzureBlobInput struct { + _ struct{} `type:"structure"` + + // Specifies the access tier that you want your objects or files transferred + // into. This only applies when using the location as a transfer destination. + // For more information, see Access tiers (https://docs.aws.amazon.com/datasync/latest/userguide/creating-azure-blob-location.html#azure-blob-access-tiers). + AccessTier *string `type:"string" enum:"AzureAccessTier"` + + // Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect + // with your Azure Blob Storage container. + // + // You can specify more than one agent. For more information, see Using multiple + // agents for your transfer (https://docs.aws.amazon.com/datasync/latest/userguide/multiple-agents.html). + // + // AgentArns is a required field + AgentArns []*string `min:"1" type:"list" required:"true"` + + // Specifies the authentication method DataSync uses to access your Azure Blob + // Storage. DataSync can access blob storage using a shared access signature + // (SAS). + // + // AuthenticationType is a required field + AuthenticationType *string `type:"string" required:"true" enum:"AzureBlobAuthenticationType"` + + // Specifies the type of blob that you want your objects or files to be when + // transferring them into Azure Blob Storage. Currently, DataSync only supports + // moving data into Azure Blob Storage as block blobs. For more information + // on blob types, see the Azure Blob Storage documentation (https://learn.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs). + BlobType *string `type:"string" enum:"AzureBlobType"` + + // Specifies the URL of the Azure Blob Storage container involved in your transfer. + // + // ContainerUrl is a required field + ContainerUrl *string `type:"string" required:"true"` + + // Specifies the SAS configuration that allows DataSync to access your Azure + // Blob Storage. + SasConfiguration *AzureBlobSasConfiguration `type:"structure"` + + // Specifies path segments if you want to limit your transfer to a virtual directory + // in your container (for example, /my/images). + Subdirectory *string `type:"string"` + + // Specifies labels that help you categorize, filter, and search for your Amazon + // Web Services resources. We recommend creating at least a name tag for your + // transfer location. + Tags []*TagListEntry `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLocationAzureBlobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLocationAzureBlobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLocationAzureBlobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLocationAzureBlobInput"} + if s.AgentArns == nil { + invalidParams.Add(request.NewErrParamRequired("AgentArns")) + } + if s.AgentArns != nil && len(s.AgentArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AgentArns", 1)) + } + if s.AuthenticationType == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationType")) + } + if s.ContainerUrl == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerUrl")) + } + if s.SasConfiguration != nil { + if err := s.SasConfiguration.Validate(); err != nil { + invalidParams.AddNested("SasConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessTier sets the AccessTier field's value. +func (s *CreateLocationAzureBlobInput) SetAccessTier(v string) *CreateLocationAzureBlobInput { + s.AccessTier = &v + return s +} + +// SetAgentArns sets the AgentArns field's value. +func (s *CreateLocationAzureBlobInput) SetAgentArns(v []*string) *CreateLocationAzureBlobInput { + s.AgentArns = v + return s +} + +// SetAuthenticationType sets the AuthenticationType field's value. +func (s *CreateLocationAzureBlobInput) SetAuthenticationType(v string) *CreateLocationAzureBlobInput { + s.AuthenticationType = &v + return s +} + +// SetBlobType sets the BlobType field's value. +func (s *CreateLocationAzureBlobInput) SetBlobType(v string) *CreateLocationAzureBlobInput { + s.BlobType = &v + return s +} + +// SetContainerUrl sets the ContainerUrl field's value. +func (s *CreateLocationAzureBlobInput) SetContainerUrl(v string) *CreateLocationAzureBlobInput { + s.ContainerUrl = &v + return s +} + +// SetSasConfiguration sets the SasConfiguration field's value. +func (s *CreateLocationAzureBlobInput) SetSasConfiguration(v *AzureBlobSasConfiguration) *CreateLocationAzureBlobInput { + s.SasConfiguration = v + return s +} + +// SetSubdirectory sets the Subdirectory field's value. +func (s *CreateLocationAzureBlobInput) SetSubdirectory(v string) *CreateLocationAzureBlobInput { + s.Subdirectory = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateLocationAzureBlobInput) SetTags(v []*TagListEntry) *CreateLocationAzureBlobInput { s.Tags = v return s } -// SetVpcEndpointId sets the VpcEndpointId field's value. -func (s *CreateAgentInput) SetVpcEndpointId(v string) *CreateAgentInput { - s.VpcEndpointId = &v - return s -} - -// CreateAgentResponse -type CreateAgentOutput struct { +type CreateLocationAzureBlobOutput struct { _ struct{} `type:"structure"` - // The ARN of the agent that you just activated. Use the ListAgents (https://docs.aws.amazon.com/datasync/latest/userguide/API_ListAgents.html) - // operation to return a list of agents in your Amazon Web Services account - // and Amazon Web Services Region. - AgentArn *string `type:"string"` + // The ARN of the Azure Blob Storage transfer location that you created. + LocationArn *string `type:"string"` } // String returns the string representation. @@ -5886,7 +6394,7 @@ type CreateAgentOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateAgentOutput) String() string { +func (s CreateLocationAzureBlobOutput) String() string { return awsutil.Prettify(s) } @@ -5895,13 +6403,13 @@ func (s CreateAgentOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateAgentOutput) GoString() string { +func (s CreateLocationAzureBlobOutput) GoString() string { return s.String() } -// SetAgentArn sets the AgentArn field's value. -func (s *CreateAgentOutput) SetAgentArn(v string) *CreateAgentOutput { - s.AgentArn = &v +// SetLocationArn sets the LocationArn field's value. +func (s *CreateLocationAzureBlobOutput) SetLocationArn(v string) *CreateLocationAzureBlobOutput { + s.LocationArn = &v return s } @@ -6965,11 +7473,11 @@ func (s *CreateLocationHdfsOutput) SetLocationArn(v string) *CreateLocationHdfsO type CreateLocationNfsInput struct { _ struct{} `type:"structure"` - // The NFS mount options that DataSync can use to mount your NFS share. + // Specifies the mount options that DataSync can use to mount your NFS share. MountOptions *NfsMountOptions `type:"structure"` - // Contains a list of Amazon Resource Names (ARNs) of agents that are used to - // connect to an NFS server. + // Specifies the Amazon Resource Names (ARNs) of agents that DataSync uses to + // connect to your NFS file server. // // If you are copying data to or from your Snowcone device, see NFS Server on // Snowcone (https://docs.aws.amazon.com/datasync/latest/userguide/create-nfs-location.html#nfs-on-snowcone) @@ -6978,25 +7486,24 @@ type CreateLocationNfsInput struct { // OnPremConfig is a required field OnPremConfig *OnPremConfig `type:"structure" required:"true"` - // The name of the NFS server. This value is the IP address or Domain Name Service - // (DNS) name of the NFS server. An agent that is installed on-premises uses - // this hostname to mount the NFS server in a network. + // Specifies the IP address or domain name of your NFS file server. An agent + // that is installed on-premises uses this hostname to mount the NFS server + // in a network. // // If you are copying data to or from your Snowcone device, see NFS Server on // Snowcone (https://docs.aws.amazon.com/datasync/latest/userguide/create-nfs-location.html#nfs-on-snowcone) // for more information. // - // This name must either be DNS-compliant or must be an IP version 4 (IPv4) - // address. + // You must specify be an IP version 4 address or Domain Name System (DNS)-compliant + // name. // // ServerHostname is a required field ServerHostname *string `type:"string" required:"true"` - // The subdirectory in the NFS file system that is used to read data from the - // NFS source location or write data to the NFS destination. The NFS path should - // be a path that's exported by the NFS server, or a subdirectory of that path. - // The path should be such that it can be mounted by other NFS clients in your - // network. + // Specifies the subdirectory in the NFS file server that DataSync transfers + // to or from. The NFS path should be a path that's exported by the NFS server, + // or a subdirectory of that path. The path should be such that it can be mounted + // by other NFS clients in your network. // // To see all the paths exported by your NFS server, run "showmount -e nfs-server-name" // from an NFS client that has access to your server. You can specify any directory @@ -7014,14 +7521,12 @@ type CreateLocationNfsInput struct { // Snowcone (https://docs.aws.amazon.com/datasync/latest/userguide/create-nfs-location.html#nfs-on-snowcone) // for more information. // - // For information about NFS export configuration, see 18.7. The /etc/exports - // Configuration File in the Red Hat Enterprise Linux documentation. - // // Subdirectory is a required field Subdirectory *string `type:"string" required:"true"` - // The key-value pair that represents the tag that you want to add to the location. - // The value can be an empty string. We recommend using tags to name your resources. + // Specifies labels that help you categorize, filter, and search for your Amazon + // Web Services resources. We recommend creating at least a name tag for your + // location. Tags []*TagListEntry `type:"list"` } @@ -7111,8 +7616,7 @@ func (s *CreateLocationNfsInput) SetTags(v []*TagListEntry) *CreateLocationNfsIn type CreateLocationNfsOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the source NFS file system location that - // is created. + // The ARN of the transfer location that you created for your NFS file server. LocationArn *string `type:"string"` } @@ -8477,6 +8981,145 @@ func (s *DescribeDiscoveryJobOutput) SetStorageSystemArn(v string) *DescribeDisc return s } +type DescribeLocationAzureBlobInput struct { + _ struct{} `type:"structure"` + + // Specifies the Amazon Resource Name (ARN) of your Azure Blob Storage transfer + // location. + // + // LocationArn is a required field + LocationArn *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLocationAzureBlobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLocationAzureBlobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLocationAzureBlobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLocationAzureBlobInput"} + if s.LocationArn == nil { + invalidParams.Add(request.NewErrParamRequired("LocationArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLocationArn sets the LocationArn field's value. +func (s *DescribeLocationAzureBlobInput) SetLocationArn(v string) *DescribeLocationAzureBlobInput { + s.LocationArn = &v + return s +} + +type DescribeLocationAzureBlobOutput struct { + _ struct{} `type:"structure"` + + // The access tier that you want your objects or files transferred into. This + // only applies when using the location as a transfer destination. For more + // information, see Access tiers (https://docs.aws.amazon.com/datasync/latest/userguide/creating-azure-blob-location.html#azure-blob-access-tiers). + AccessTier *string `type:"string" enum:"AzureAccessTier"` + + // The ARNs of the DataSync agents that can connect with your Azure Blob Storage + // container. + AgentArns []*string `min:"1" type:"list"` + + // The authentication method DataSync uses to access your Azure Blob Storage. + // DataSync can access blob storage using a shared access signature (SAS). + AuthenticationType *string `type:"string" enum:"AzureBlobAuthenticationType"` + + // The type of blob that you want your objects or files to be when transferring + // them into Azure Blob Storage. Currently, DataSync only supports moving data + // into Azure Blob Storage as block blobs. For more information on blob types, + // see the Azure Blob Storage documentation (https://learn.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs). + BlobType *string `type:"string" enum:"AzureBlobType"` + + // The time that your Azure Blob Storage transfer location was created. + CreationTime *time.Time `type:"timestamp"` + + // The ARN of your Azure Blob Storage transfer location. + LocationArn *string `type:"string"` + + // The URL of the Azure Blob Storage container involved in your transfer. + LocationUri *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLocationAzureBlobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLocationAzureBlobOutput) GoString() string { + return s.String() +} + +// SetAccessTier sets the AccessTier field's value. +func (s *DescribeLocationAzureBlobOutput) SetAccessTier(v string) *DescribeLocationAzureBlobOutput { + s.AccessTier = &v + return s +} + +// SetAgentArns sets the AgentArns field's value. +func (s *DescribeLocationAzureBlobOutput) SetAgentArns(v []*string) *DescribeLocationAzureBlobOutput { + s.AgentArns = v + return s +} + +// SetAuthenticationType sets the AuthenticationType field's value. +func (s *DescribeLocationAzureBlobOutput) SetAuthenticationType(v string) *DescribeLocationAzureBlobOutput { + s.AuthenticationType = &v + return s +} + +// SetBlobType sets the BlobType field's value. +func (s *DescribeLocationAzureBlobOutput) SetBlobType(v string) *DescribeLocationAzureBlobOutput { + s.BlobType = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeLocationAzureBlobOutput) SetCreationTime(v time.Time) *DescribeLocationAzureBlobOutput { + s.CreationTime = &v + return s +} + +// SetLocationArn sets the LocationArn field's value. +func (s *DescribeLocationAzureBlobOutput) SetLocationArn(v string) *DescribeLocationAzureBlobOutput { + s.LocationArn = &v + return s +} + +// SetLocationUri sets the LocationUri field's value. +func (s *DescribeLocationAzureBlobOutput) SetLocationUri(v string) *DescribeLocationAzureBlobOutput { + s.LocationUri = &v + return s +} + // DescribeLocationEfsRequest type DescribeLocationEfsInput struct { _ struct{} `type:"structure"` @@ -9340,7 +9983,7 @@ type DescribeLocationNfsOutput struct { // The URL of the source NFS location that was described. LocationUri *string `type:"string"` - // The NFS mount options that DataSync used to mount your NFS share. + // The mount options that DataSync uses to mount your NFS share. MountOptions *NfsMountOptions `type:"structure"` // A list of Amazon Resource Names (ARNs) of agents to use for a Network File @@ -10305,7 +10948,7 @@ func (s *DescribeStorageSystemResourcesOutput) SetResourceDetails(v *ResourceDet type DescribeTaskExecutionInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the task that is being executed. + // Specifies the Amazon Resource Name (ARN) of the transfer task that's running. // // TaskExecutionArn is a required field TaskExecutionArn *string `type:"string" required:"true"` @@ -10528,7 +11171,7 @@ func (s *DescribeTaskExecutionOutput) SetTaskExecutionArn(v string) *DescribeTas type DescribeTaskInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the task to describe. + // Specifies the Amazon Resource Name (ARN) of the transfer task. // // TaskArn is a required field TaskArn *string `type:"string" required:"true"` @@ -13546,29 +14189,25 @@ func (s *P95Metrics) SetThroughput(v *Throughput) *P95Metrics { return s } -// The VPC endpoint, subnet, and security group that an agent uses to access -// IP addresses in a VPC (Virtual Private Cloud). +// Specifies how your DataSync agent connects to Amazon Web Services using a +// virtual private cloud (VPC) service endpoint. An agent that uses a VPC endpoint +// isn't accessible over the public internet. type PrivateLinkConfig struct { _ struct{} `type:"structure"` - // The private endpoint that is configured for an agent that has access to IP - // addresses in a PrivateLink (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-service.html). - // An agent that is configured with this endpoint will not be accessible over - // the public internet. + // Specifies the VPC endpoint provided by Amazon Web Services PrivateLink (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-service.html) + // that your agent connects to. PrivateLinkEndpoint *string `min:"7" type:"string"` - // The Amazon Resource Names (ARNs) of the security groups that are configured - // for the EC2 resource that hosts an agent activated in a VPC or an agent that - // has access to a VPC endpoint. + // Specifies the Amazon Resource Names (ARN) of the security group that provides + // DataSync access to your VPC endpoint. You can only specify one ARN. SecurityGroupArns []*string `min:"1" type:"list"` - // The Amazon Resource Names (ARNs) of the subnets that are configured for an - // agent activated in a VPC or an agent that has access to a VPC endpoint. + // Specifies the ARN of the subnet where your VPC endpoint is located. You can + // only specify one ARN. SubnetArns []*string `min:"1" type:"list"` - // The ID of the VPC endpoint that is configured for an agent. An agent that - // is configured with a VPC endpoint will not be accessible over the public - // internet. + // Specifies the ID of the VPC endpoint that your agent connects to. VpcEndpointId *string `type:"string"` } @@ -15218,6 +15857,150 @@ func (s UpdateDiscoveryJobOutput) GoString() string { return s.String() } +type UpdateLocationAzureBlobInput struct { + _ struct{} `type:"structure"` + + // Specifies the access tier that you want your objects or files transferred + // into. This only applies when using the location as a transfer destination. + // For more information, see Access tiers (https://docs.aws.amazon.com/datasync/latest/userguide/creating-azure-blob-location.html#azure-blob-access-tiers). + AccessTier *string `type:"string" enum:"AzureAccessTier"` + + // Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect + // with your Azure Blob Storage container. + // + // You can specify more than one agent. For more information, see Using multiple + // agents for your transfer (https://docs.aws.amazon.com/datasync/latest/userguide/multiple-agents.html). + AgentArns []*string `min:"1" type:"list"` + + // Specifies the authentication method DataSync uses to access your Azure Blob + // Storage. DataSync can access blob storage using a shared access signature + // (SAS). + AuthenticationType *string `type:"string" enum:"AzureBlobAuthenticationType"` + + // Specifies the type of blob that you want your objects or files to be when + // transferring them into Azure Blob Storage. Currently, DataSync only supports + // moving data into Azure Blob Storage as block blobs. For more information + // on blob types, see the Azure Blob Storage documentation (https://learn.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs). + BlobType *string `type:"string" enum:"AzureBlobType"` + + // Specifies the ARN of the Azure Blob Storage transfer location that you're + // updating. + // + // LocationArn is a required field + LocationArn *string `type:"string" required:"true"` + + // Specifies the SAS configuration that allows DataSync to access your Azure + // Blob Storage. + SasConfiguration *AzureBlobSasConfiguration `type:"structure"` + + // Specifies path segments if you want to limit your transfer to a virtual directory + // in your container (for example, /my/images). + Subdirectory *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateLocationAzureBlobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateLocationAzureBlobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateLocationAzureBlobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateLocationAzureBlobInput"} + if s.AgentArns != nil && len(s.AgentArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AgentArns", 1)) + } + if s.LocationArn == nil { + invalidParams.Add(request.NewErrParamRequired("LocationArn")) + } + if s.SasConfiguration != nil { + if err := s.SasConfiguration.Validate(); err != nil { + invalidParams.AddNested("SasConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessTier sets the AccessTier field's value. +func (s *UpdateLocationAzureBlobInput) SetAccessTier(v string) *UpdateLocationAzureBlobInput { + s.AccessTier = &v + return s +} + +// SetAgentArns sets the AgentArns field's value. +func (s *UpdateLocationAzureBlobInput) SetAgentArns(v []*string) *UpdateLocationAzureBlobInput { + s.AgentArns = v + return s +} + +// SetAuthenticationType sets the AuthenticationType field's value. +func (s *UpdateLocationAzureBlobInput) SetAuthenticationType(v string) *UpdateLocationAzureBlobInput { + s.AuthenticationType = &v + return s +} + +// SetBlobType sets the BlobType field's value. +func (s *UpdateLocationAzureBlobInput) SetBlobType(v string) *UpdateLocationAzureBlobInput { + s.BlobType = &v + return s +} + +// SetLocationArn sets the LocationArn field's value. +func (s *UpdateLocationAzureBlobInput) SetLocationArn(v string) *UpdateLocationAzureBlobInput { + s.LocationArn = &v + return s +} + +// SetSasConfiguration sets the SasConfiguration field's value. +func (s *UpdateLocationAzureBlobInput) SetSasConfiguration(v *AzureBlobSasConfiguration) *UpdateLocationAzureBlobInput { + s.SasConfiguration = v + return s +} + +// SetSubdirectory sets the Subdirectory field's value. +func (s *UpdateLocationAzureBlobInput) SetSubdirectory(v string) *UpdateLocationAzureBlobInput { + s.Subdirectory = &v + return s +} + +type UpdateLocationAzureBlobOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateLocationAzureBlobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateLocationAzureBlobOutput) GoString() string { + return s.String() +} + type UpdateLocationHdfsInput struct { _ struct{} `type:"structure"` @@ -15443,7 +16226,8 @@ func (s UpdateLocationHdfsOutput) GoString() string { type UpdateLocationNfsInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the NFS location to update. + // Specifies the Amazon Resource Name (ARN) of the NFS location that you want + // to update. // // LocationArn is a required field LocationArn *string `type:"string" required:"true"` @@ -15455,11 +16239,10 @@ type UpdateLocationNfsInput struct { // System (NFS) location. OnPremConfig *OnPremConfig `type:"structure"` - // The subdirectory in the NFS file system that is used to read data from the - // NFS source location or write data to the NFS destination. The NFS path should - // be a path that's exported by the NFS server, or a subdirectory of that path. - // The path should be such that it can be mounted by other NFS clients in your - // network. + // Specifies the subdirectory in your NFS file system that DataSync uses to + // read from or write to during a transfer. The NFS path should be exported + // by the NFS server, or a subdirectory of that path. The path should be such + // that it can be mounted by other NFS clients in your network. // // To see all the paths exported by your NFS server, run "showmount -e nfs-server-name" // from an NFS client that has access to your server. You can specify any directory @@ -15476,9 +16259,6 @@ type UpdateLocationNfsInput struct { // If you are copying data to or from your Snowcone device, see NFS Server on // Snowcone (https://docs.aws.amazon.com/datasync/latest/userguide/create-nfs-location.html#nfs-on-snowcone) // for more information. - // - // For information about NFS export configuration, see 18.7. The /etc/exports - // Configuration File in the Red Hat Enterprise Linux documentation. Subdirectory *string `type:"string"` } @@ -15882,7 +16662,7 @@ type UpdateStorageSystemInput struct { _ struct{} `type:"structure"` // Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects - // to and reads your on-premises storage system. + // to and reads your on-premises storage system. You can only specify one ARN. AgentArns []*string `min:"1" type:"list"` // Specifies the ARN of the Amazon CloudWatch log group for monitoring and logging @@ -16288,6 +17068,50 @@ func Atime_Values() []string { } } +const ( + // AzureAccessTierHot is a AzureAccessTier enum value + AzureAccessTierHot = "HOT" + + // AzureAccessTierCool is a AzureAccessTier enum value + AzureAccessTierCool = "COOL" + + // AzureAccessTierArchive is a AzureAccessTier enum value + AzureAccessTierArchive = "ARCHIVE" +) + +// AzureAccessTier_Values returns all elements of the AzureAccessTier enum +func AzureAccessTier_Values() []string { + return []string{ + AzureAccessTierHot, + AzureAccessTierCool, + AzureAccessTierArchive, + } +} + +const ( + // AzureBlobAuthenticationTypeSas is a AzureBlobAuthenticationType enum value + AzureBlobAuthenticationTypeSas = "SAS" +) + +// AzureBlobAuthenticationType_Values returns all elements of the AzureBlobAuthenticationType enum +func AzureBlobAuthenticationType_Values() []string { + return []string{ + AzureBlobAuthenticationTypeSas, + } +} + +const ( + // AzureBlobTypeBlock is a AzureBlobType enum value + AzureBlobTypeBlock = "BLOCK" +) + +// AzureBlobType_Values returns all elements of the AzureBlobType enum +func AzureBlobType_Values() []string { + return []string{ + AzureBlobTypeBlock, + } +} + const ( // DiscoveryJobStatusRunning is a DiscoveryJobStatus enum value DiscoveryJobStatusRunning = "RUNNING" diff --git a/service/datasync/datasynciface/interface.go b/service/datasync/datasynciface/interface.go index 3c49d1679bb..f8f08b5232b 100644 --- a/service/datasync/datasynciface/interface.go +++ b/service/datasync/datasynciface/interface.go @@ -72,6 +72,10 @@ type DataSyncAPI interface { CreateAgentWithContext(aws.Context, *datasync.CreateAgentInput, ...request.Option) (*datasync.CreateAgentOutput, error) CreateAgentRequest(*datasync.CreateAgentInput) (*request.Request, *datasync.CreateAgentOutput) + CreateLocationAzureBlob(*datasync.CreateLocationAzureBlobInput) (*datasync.CreateLocationAzureBlobOutput, error) + CreateLocationAzureBlobWithContext(aws.Context, *datasync.CreateLocationAzureBlobInput, ...request.Option) (*datasync.CreateLocationAzureBlobOutput, error) + CreateLocationAzureBlobRequest(*datasync.CreateLocationAzureBlobInput) (*request.Request, *datasync.CreateLocationAzureBlobOutput) + CreateLocationEfs(*datasync.CreateLocationEfsInput) (*datasync.CreateLocationEfsOutput, error) CreateLocationEfsWithContext(aws.Context, *datasync.CreateLocationEfsInput, ...request.Option) (*datasync.CreateLocationEfsOutput, error) CreateLocationEfsRequest(*datasync.CreateLocationEfsInput) (*request.Request, *datasync.CreateLocationEfsOutput) @@ -136,6 +140,10 @@ type DataSyncAPI interface { DescribeDiscoveryJobWithContext(aws.Context, *datasync.DescribeDiscoveryJobInput, ...request.Option) (*datasync.DescribeDiscoveryJobOutput, error) DescribeDiscoveryJobRequest(*datasync.DescribeDiscoveryJobInput) (*request.Request, *datasync.DescribeDiscoveryJobOutput) + DescribeLocationAzureBlob(*datasync.DescribeLocationAzureBlobInput) (*datasync.DescribeLocationAzureBlobOutput, error) + DescribeLocationAzureBlobWithContext(aws.Context, *datasync.DescribeLocationAzureBlobInput, ...request.Option) (*datasync.DescribeLocationAzureBlobOutput, error) + DescribeLocationAzureBlobRequest(*datasync.DescribeLocationAzureBlobInput) (*request.Request, *datasync.DescribeLocationAzureBlobOutput) + DescribeLocationEfs(*datasync.DescribeLocationEfsInput) (*datasync.DescribeLocationEfsOutput, error) DescribeLocationEfsWithContext(aws.Context, *datasync.DescribeLocationEfsInput, ...request.Option) (*datasync.DescribeLocationEfsOutput, error) DescribeLocationEfsRequest(*datasync.DescribeLocationEfsInput) (*request.Request, *datasync.DescribeLocationEfsOutput) @@ -287,6 +295,10 @@ type DataSyncAPI interface { UpdateDiscoveryJobWithContext(aws.Context, *datasync.UpdateDiscoveryJobInput, ...request.Option) (*datasync.UpdateDiscoveryJobOutput, error) UpdateDiscoveryJobRequest(*datasync.UpdateDiscoveryJobInput) (*request.Request, *datasync.UpdateDiscoveryJobOutput) + UpdateLocationAzureBlob(*datasync.UpdateLocationAzureBlobInput) (*datasync.UpdateLocationAzureBlobOutput, error) + UpdateLocationAzureBlobWithContext(aws.Context, *datasync.UpdateLocationAzureBlobInput, ...request.Option) (*datasync.UpdateLocationAzureBlobOutput, error) + UpdateLocationAzureBlobRequest(*datasync.UpdateLocationAzureBlobInput) (*request.Request, *datasync.UpdateLocationAzureBlobOutput) + UpdateLocationHdfs(*datasync.UpdateLocationHdfsInput) (*datasync.UpdateLocationHdfsOutput, error) UpdateLocationHdfsWithContext(aws.Context, *datasync.UpdateLocationHdfsInput, ...request.Option) (*datasync.UpdateLocationHdfsOutput, error) UpdateLocationHdfsRequest(*datasync.UpdateLocationHdfsInput) (*request.Request, *datasync.UpdateLocationHdfsOutput) diff --git a/service/dynamodb/api.go b/service/dynamodb/api.go index 47f359cd6f4..a250c7622d6 100644 --- a/service/dynamodb/api.go +++ b/service/dynamodb/api.go @@ -5672,17 +5672,24 @@ func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output * // every item in a table or a secondary index. To have DynamoDB return fewer // items, you can provide a FilterExpression operation. // -// If the total number of scanned items exceeds the maximum dataset size limit -// of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey -// value to continue the scan in a subsequent operation. The results also include -// the number of items exceeding the limit. A scan can result in no table data -// meeting the filter criteria. -// -// A single Scan operation reads up to the maximum number of items set (if using -// the Limit parameter) or a maximum of 1 MB of data and then apply any filtering -// to the results using FilterExpression. If LastEvaluatedKey is present in -// the response, you need to paginate the result set. For more information, -// see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination) +// If the total size of scanned items exceeds the maximum dataset size limit +// of 1 MB, the scan completes and results are returned to the user. The LastEvaluatedKey +// value is also returned and the requestor can use the LastEvaluatedKey to +// continue the scan in a subsequent operation. Each scan response also includes +// number of items that were scanned (ScannedCount) as part of the request. +// If using a FilterExpression, a scan result can result in no items meeting +// the criteria and the Count will result in zero. If you did not use a FilterExpression +// in the scan request, then Count is the same as ScannedCount. +// +// Count and ScannedCount only return the count of items specific to a single +// scan request and, unless the table is less than 1MB, do not represent the +// total number of items in the table. +// +// A single Scan operation first reads up to the maximum number of items set +// (if using the Limit parameter) or a maximum of 1 MB of data and then applies +// any filtering to the results if a FilterExpression is provided. If LastEvaluatedKey +// is present in the response, pagination is required to complete the full table +// scan. For more information, see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination) // in the Amazon DynamoDB Developer Guide. // // Scan operations proceed sequentially; however, for faster performance on @@ -5691,11 +5698,18 @@ func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output * // information, see Parallel Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan) // in the Amazon DynamoDB Developer Guide. // -// Scan uses eventually consistent reads when accessing the data in a table; -// therefore, the result set might not include the changes to data in the table -// immediately before the operation began. If you need a consistent copy of -// the data, as of the time that the Scan begins, you can set the ConsistentRead -// parameter to true. +// By default, a Scan uses eventually consistent reads when accessing the items +// in a table. Therefore, the results from an eventually consistent Scan may +// not include the latest item changes at the time the scan iterates through +// each item in the table. If you require a strongly consistent read of each +// item as the scan iterates through the items in the table, you can set the +// ConsistentRead parameter to true. Strong consistency only relates to the +// consistency of the read at the item level. +// +// DynamoDB does not provide snapshot isolation for a scan operation when the +// ConsistentRead parameter is set to true. Thus, a DynamoDB scan operation +// does not guarantee that all reads in a scan see a consistent snapshot of +// the table when the scan operation was requested. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9018,7 +9032,8 @@ type BatchExecuteStatementOutput struct { // are ordered according to the ordering of the statements. ConsumedCapacity []*ConsumedCapacity `type:"list"` - // The response to each PartiQL statement in the batch. + // The response to each PartiQL statement in the batch. The values of the list + // are ordered according to the ordering of the request statements. Responses []*BatchStatementResponse `type:"list"` } @@ -24831,7 +24846,7 @@ type Update struct { // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the // Update condition fails. For ReturnValuesOnConditionCheckFailure, the valid - // values are: NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW. + // values are: NONE and ALL_OLD. ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` // Name of the table for the UpdateItem request. diff --git a/service/ec2/api.go b/service/ec2/api.go index 7180f5b5892..89615386fe8 100644 --- a/service/ec2/api.go +++ b/service/ec2/api.go @@ -128525,6 +128525,10 @@ type InferenceAcceleratorInfo struct { // Describes the Inference accelerators for the instance type. Accelerators []*InferenceDeviceInfo `locationName:"accelerators" type:"list"` + + // The total size of the memory for the inference accelerators for the instance + // type, in MiB. + TotalInferenceMemoryInMiB *int64 `locationName:"totalInferenceMemoryInMiB" type:"integer"` } // String returns the string representation. @@ -128551,6 +128555,12 @@ func (s *InferenceAcceleratorInfo) SetAccelerators(v []*InferenceDeviceInfo) *In return s } +// SetTotalInferenceMemoryInMiB sets the TotalInferenceMemoryInMiB field's value. +func (s *InferenceAcceleratorInfo) SetTotalInferenceMemoryInMiB(v int64) *InferenceAcceleratorInfo { + s.TotalInferenceMemoryInMiB = &v + return s +} + // Describes the Inference accelerators for the instance type. type InferenceDeviceInfo struct { _ struct{} `type:"structure"` @@ -128561,6 +128571,9 @@ type InferenceDeviceInfo struct { // The manufacturer of the Inference accelerator. Manufacturer *string `locationName:"manufacturer" type:"string"` + // Describes the memory available to the inference accelerator. + MemoryInfo *InferenceDeviceMemoryInfo `locationName:"memoryInfo" type:"structure"` + // The name of the Inference accelerator. Name *string `locationName:"name" type:"string"` } @@ -128595,12 +128608,50 @@ func (s *InferenceDeviceInfo) SetManufacturer(v string) *InferenceDeviceInfo { return s } +// SetMemoryInfo sets the MemoryInfo field's value. +func (s *InferenceDeviceInfo) SetMemoryInfo(v *InferenceDeviceMemoryInfo) *InferenceDeviceInfo { + s.MemoryInfo = v + return s +} + // SetName sets the Name field's value. func (s *InferenceDeviceInfo) SetName(v string) *InferenceDeviceInfo { s.Name = &v return s } +// Describes the memory available to the inference accelerator. +type InferenceDeviceMemoryInfo struct { + _ struct{} `type:"structure"` + + // The size of the memory available to the inference accelerator, in MiB. + SizeInMiB *int64 `locationName:"sizeInMiB" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InferenceDeviceMemoryInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InferenceDeviceMemoryInfo) GoString() string { + return s.String() +} + +// SetSizeInMiB sets the SizeInMiB field's value. +func (s *InferenceDeviceMemoryInfo) SetSizeInMiB(v int64) *InferenceDeviceMemoryInfo { + s.SizeInMiB = &v + return s +} + // Describes an instance. type Instance struct { _ struct{} `type:"structure"` @@ -150493,6 +150544,9 @@ func (s *NetworkBandwidthGbpsRequest) SetMin(v float64) *NetworkBandwidthGbpsReq type NetworkCardInfo struct { _ struct{} `type:"structure"` + // The baseline network performance of the network card, in Gbps. + BaselineBandwidthInGbps *float64 `locationName:"baselineBandwidthInGbps" type:"double"` + // The maximum number of network interfaces for the network card. MaximumNetworkInterfaces *int64 `locationName:"maximumNetworkInterfaces" type:"integer"` @@ -150501,6 +150555,9 @@ type NetworkCardInfo struct { // The network performance of the network card. NetworkPerformance *string `locationName:"networkPerformance" type:"string"` + + // The peak (burst) network performance of the network card, in Gbps. + PeakBandwidthInGbps *float64 `locationName:"peakBandwidthInGbps" type:"double"` } // String returns the string representation. @@ -150521,6 +150578,12 @@ func (s NetworkCardInfo) GoString() string { return s.String() } +// SetBaselineBandwidthInGbps sets the BaselineBandwidthInGbps field's value. +func (s *NetworkCardInfo) SetBaselineBandwidthInGbps(v float64) *NetworkCardInfo { + s.BaselineBandwidthInGbps = &v + return s +} + // SetMaximumNetworkInterfaces sets the MaximumNetworkInterfaces field's value. func (s *NetworkCardInfo) SetMaximumNetworkInterfaces(v int64) *NetworkCardInfo { s.MaximumNetworkInterfaces = &v @@ -150539,6 +150602,12 @@ func (s *NetworkCardInfo) SetNetworkPerformance(v string) *NetworkCardInfo { return s } +// SetPeakBandwidthInGbps sets the PeakBandwidthInGbps field's value. +func (s *NetworkCardInfo) SetPeakBandwidthInGbps(v float64) *NetworkCardInfo { + s.PeakBandwidthInGbps = &v + return s +} + // Describes the networking features of the instance type. type NetworkInfo struct { _ struct{} `type:"structure"` diff --git a/service/emrserverless/api.go b/service/emrserverless/api.go index 48a371be077..d8ddf3e5e43 100644 --- a/service/emrserverless/api.go +++ b/service/emrserverless/api.go @@ -406,8 +406,16 @@ func (c *EMRServerless) GetDashboardForJobRunRequest(input *GetDashboardForJobRu // GetDashboardForJobRun API operation for EMR Serverless. // -// Returns a URL to access the job run dashboard. The generated URL is valid -// for one hour, after which you must invoke the API again to generate a new +// Creates and returns a URL that you can use to access the application UIs +// for a job run. +// +// For jobs in a running state, the application UI is a live user interface +// such as the Spark or Tez web UI. For completed jobs, the application UI is +// a persistent application user interface such as the Spark History Server +// or persistent Tez UI. +// +// The URL is valid for one hour after you generate it. To access the application +// UI after that hour elapses, you must invoke the API again to generate a new // URL. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1968,6 +1976,111 @@ func (s *CancelJobRunOutput) SetJobRunId(v string) *CancelJobRunOutput { return s } +// The Amazon CloudWatch configuration for monitoring logs. You can configure +// your jobs to send log information to CloudWatch. +type CloudWatchLoggingConfiguration struct { + _ struct{} `type:"structure"` + + // Enables CloudWatch logging. + // + // Enabled is a required field + Enabled *bool `locationName:"enabled" type:"boolean" required:"true"` + + // The Key Management Service (KMS) key ARN to encrypt the logs that you store + // in CloudWatch Logs. + EncryptionKeyArn *string `locationName:"encryptionKeyArn" min:"20" type:"string"` + + // The name of the log group in Amazon CloudWatch Logs where you want to publish + // your logs. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // Prefix for the CloudWatch log stream name. + LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` + + // The types of logs that you want to publish to CloudWatch. If you don't specify + // any log types, driver STDOUT and STDERR logs will be published to CloudWatch + // Logs by default. For more information including the supported worker types + // for Hive and Spark, see Logging for EMR Serverless with CloudWatch (https://docs.aws.amazon.com/emr/latest/EMR-Serverless-UserGuide/logging.html#jobs-log-storage-cw). + // + // * Key Valid Values: SPARK_DRIVER, SPARK_EXECUTOR, HIVE_DRIVER, TEZ_TASK + // + // * Array Members Valid Values: STDOUT, STDERR, HIVE_LOG, TEZ_AM, SYSTEM_LOGS + LogTypes map[string][]*string `locationName:"logTypes" min:"1" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CloudWatchLoggingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CloudWatchLoggingConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CloudWatchLoggingConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CloudWatchLoggingConfiguration"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.EncryptionKeyArn != nil && len(*s.EncryptionKeyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("EncryptionKeyArn", 20)) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamNamePrefix != nil && len(*s.LogStreamNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamNamePrefix", 1)) + } + if s.LogTypes != nil && len(s.LogTypes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogTypes", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnabled sets the Enabled field's value. +func (s *CloudWatchLoggingConfiguration) SetEnabled(v bool) *CloudWatchLoggingConfiguration { + s.Enabled = &v + return s +} + +// SetEncryptionKeyArn sets the EncryptionKeyArn field's value. +func (s *CloudWatchLoggingConfiguration) SetEncryptionKeyArn(v string) *CloudWatchLoggingConfiguration { + s.EncryptionKeyArn = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *CloudWatchLoggingConfiguration) SetLogGroupName(v string) *CloudWatchLoggingConfiguration { + s.LogGroupName = &v + return s +} + +// SetLogStreamNamePrefix sets the LogStreamNamePrefix field's value. +func (s *CloudWatchLoggingConfiguration) SetLogStreamNamePrefix(v string) *CloudWatchLoggingConfiguration { + s.LogStreamNamePrefix = &v + return s +} + +// SetLogTypes sets the LogTypes field's value. +func (s *CloudWatchLoggingConfiguration) SetLogTypes(v map[string][]*string) *CloudWatchLoggingConfiguration { + s.LogTypes = v + return s +} + // A configuration specification to be used when provisioning an application. // A configuration consists of a classification, properties, and optional nested // configurations. A classification refers to an application-specific configuration @@ -4026,6 +4139,10 @@ func (s *MaximumAllowedResources) SetMemory(v string) *MaximumAllowedResources { type MonitoringConfiguration struct { _ struct{} `type:"structure"` + // The Amazon CloudWatch configuration for monitoring logs. You can configure + // your jobs to send log information to CloudWatch. + CloudWatchLoggingConfiguration *CloudWatchLoggingConfiguration `locationName:"cloudWatchLoggingConfiguration" type:"structure"` + // The managed log persistence configuration for a job run. ManagedPersistenceMonitoringConfiguration *ManagedPersistenceMonitoringConfiguration `locationName:"managedPersistenceMonitoringConfiguration" type:"structure"` @@ -4054,6 +4171,11 @@ func (s MonitoringConfiguration) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *MonitoringConfiguration) Validate() error { invalidParams := request.ErrInvalidParams{Context: "MonitoringConfiguration"} + if s.CloudWatchLoggingConfiguration != nil { + if err := s.CloudWatchLoggingConfiguration.Validate(); err != nil { + invalidParams.AddNested("CloudWatchLoggingConfiguration", err.(request.ErrInvalidParams)) + } + } if s.ManagedPersistenceMonitoringConfiguration != nil { if err := s.ManagedPersistenceMonitoringConfiguration.Validate(); err != nil { invalidParams.AddNested("ManagedPersistenceMonitoringConfiguration", err.(request.ErrInvalidParams)) @@ -4071,6 +4193,12 @@ func (s *MonitoringConfiguration) Validate() error { return nil } +// SetCloudWatchLoggingConfiguration sets the CloudWatchLoggingConfiguration field's value. +func (s *MonitoringConfiguration) SetCloudWatchLoggingConfiguration(v *CloudWatchLoggingConfiguration) *MonitoringConfiguration { + s.CloudWatchLoggingConfiguration = v + return s +} + // SetManagedPersistenceMonitoringConfiguration sets the ManagedPersistenceMonitoringConfiguration field's value. func (s *MonitoringConfiguration) SetManagedPersistenceMonitoringConfiguration(v *ManagedPersistenceMonitoringConfiguration) *MonitoringConfiguration { s.ManagedPersistenceMonitoringConfiguration = v diff --git a/service/lambda/api.go b/service/lambda/api.go index d31909be887..3aef4434a91 100644 --- a/service/lambda/api.go +++ b/service/lambda/api.go @@ -22975,6 +22975,9 @@ const ( // RuntimeRuby32 is a Runtime enum value RuntimeRuby32 = "ruby3.2" + + // RuntimePython311 is a Runtime enum value + RuntimePython311 = "python3.11" ) // Runtime_Values returns all elements of the Runtime enum @@ -23011,6 +23014,7 @@ func Runtime_Values() []string { RuntimePython310, RuntimeJava17, RuntimeRuby32, + RuntimePython311, } } diff --git a/service/rds/api.go b/service/rds/api.go index a935f88fdb5..38933b5efc4 100644 --- a/service/rds/api.go +++ b/service/rds/api.go @@ -3015,10 +3015,10 @@ func (c *RDS) CreateGlobalClusterRequest(input *CreateGlobalClusterInput) (req * // primary cluster through high-speed replication performed by the Aurora storage // subsystem. // -// You can create a global database that is initially empty, and then add a -// primary cluster and a secondary cluster to it. Or you can specify an existing -// Aurora cluster during the create operation, and this cluster becomes the -// primary cluster of the global database. +// You can create a global database that is initially empty, and then create +// the primary and secondary DB clusters in the global database. Or you can +// specify an existing Aurora cluster during the create operation, and this +// cluster becomes the primary cluster of the global database. // // This operation applies only to Aurora DB clusters. // @@ -12104,13 +12104,13 @@ func (c *RDS) ModifyGlobalClusterRequest(input *ModifyGlobalClusterInput) (req * // ModifyGlobalCluster API operation for Amazon Relational Database Service. // -// Modify a setting for an Amazon Aurora global cluster. You can change one +// Modifies a setting for an Amazon Aurora global cluster. You can change one // or more database configuration parameters by specifying these parameters // and the new values in the request. For more information on Amazon Aurora, // see What is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // -// This action only applies to Aurora DB clusters. +// This operation only applies to Aurora global database clusters. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -26920,6 +26920,9 @@ type DBInstance struct { // by subelements. PendingModifiedValues *PendingModifiedValues `type:"structure"` + // The progress of the storage optimization operation as a percentage. + PercentProgress *string `type:"string"` + // Indicates whether Performance Insights is enabled for the DB instance. PerformanceInsightsEnabled *bool `type:"boolean"` @@ -27409,6 +27412,12 @@ func (s *DBInstance) SetPendingModifiedValues(v *PendingModifiedValues) *DBInsta return s } +// SetPercentProgress sets the PercentProgress field's value. +func (s *DBInstance) SetPercentProgress(v string) *DBInstance { + s.PercentProgress = &v + return s +} + // SetPerformanceInsightsEnabled sets the PerformanceInsightsEnabled field's value. func (s *DBInstance) SetPerformanceInsightsEnabled(v bool) *DBInstance { s.PerformanceInsightsEnabled = &v @@ -41423,7 +41432,7 @@ type ModifyDBInstanceInput struct { // * Can't be set to 0 for an RDS Custom for Oracle DB instance. BackupRetentionPeriod *int64 `type:"integer"` - // The CA certificate identifier to use for the DB instance6's server certificate. + // The CA certificate identifier to use for the DB instance's server certificate. // // This setting doesn't apply to RDS Custom DB instances. // @@ -43565,25 +43574,23 @@ func (s *ModifyEventSubscriptionOutput) SetEventSubscription(v *EventSubscriptio type ModifyGlobalClusterInput struct { _ struct{} `type:"structure"` - // A value that indicates whether major version upgrades are allowed. + // Specifies whether to allow major version upgrades. // - // Constraints: You must allow major version upgrades when specifying a value - // for the EngineVersion parameter that is a different major version than the - // DB cluster's current version. + // Constraints: Must be enabled if you specify a value for the EngineVersion + // parameter that's a different major version than the global cluster's current + // version. // // If you upgrade the major version of a global database, the cluster and DB // instance parameter groups are set to the default parameter groups for the // new version. Apply any custom parameter groups after completing the upgrade. AllowMajorVersionUpgrade *bool `type:"boolean"` - // Indicates if the global database cluster has deletion protection enabled. + // Specifies whether to enable deletion protection for the global database cluster. // The global database cluster can't be deleted when deletion protection is // enabled. DeletionProtection *bool `type:"boolean"` - // The version number of the database engine to which you want to upgrade. Changing - // this parameter results in an outage. The change is applied during the next - // maintenance window unless ApplyImmediately is enabled. + // The version number of the database engine to which you want to upgrade. // // To list all of the available engine versions for aurora-mysql (for MySQL-based // Aurora global databases), use the following command: @@ -43598,24 +43605,24 @@ type ModifyGlobalClusterInput struct { // == `true`].[EngineVersion]' EngineVersion *string `type:"string"` - // The DB cluster identifier for the global cluster being modified. This parameter - // isn't case-sensitive. + // The cluster identifier for the global cluster to modify. This parameter isn't + // case-sensitive. // // Constraints: // // * Must match the identifier of an existing global database cluster. GlobalClusterIdentifier *string `type:"string"` - // The new cluster identifier for the global database cluster when modifying - // a global database cluster. This value is stored as a lowercase string. + // The new cluster identifier for the global database cluster. This value is + // stored as a lowercase string. // // Constraints: // - // * Must contain from 1 to 63 letters, numbers, or hyphens + // * Must contain from 1 to 63 letters, numbers, or hyphens. // - // * The first character must be a letter + // * The first character must be a letter. // - // * Can't end with a hyphen or contain two consecutive hyphens + // * Can't end with a hyphen or contain two consecutive hyphens. // // Example: my-cluster2 NewGlobalClusterIdentifier *string `type:"string"` diff --git a/service/sagemaker/api.go b/service/sagemaker/api.go index 567f8fcf304..0ab43328c0c 100644 --- a/service/sagemaker/api.go +++ b/service/sagemaker/api.go @@ -107405,12 +107405,16 @@ type TextClassificationJobConfig struct { CompletionCriteria *AutoMLJobCompletionCriteria `type:"structure"` // The name of the column used to provide the sentences to be classified. It - // should not be the same as the target column (Required). - ContentColumn *string `min:"1" type:"string"` + // should not be the same as the target column. + // + // ContentColumn is a required field + ContentColumn *string `min:"1" type:"string" required:"true"` // The name of the column used to provide the class labels. It should not be - // same as the content column (Required). - TargetLabelColumn *string `min:"1" type:"string"` + // same as the content column. + // + // TargetLabelColumn is a required field + TargetLabelColumn *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -107434,9 +107438,15 @@ func (s TextClassificationJobConfig) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *TextClassificationJobConfig) Validate() error { invalidParams := request.ErrInvalidParams{Context: "TextClassificationJobConfig"} + if s.ContentColumn == nil { + invalidParams.Add(request.NewErrParamRequired("ContentColumn")) + } if s.ContentColumn != nil && len(*s.ContentColumn) < 1 { invalidParams.Add(request.NewErrParamMinLen("ContentColumn", 1)) } + if s.TargetLabelColumn == nil { + invalidParams.Add(request.NewErrParamRequired("TargetLabelColumn")) + } if s.TargetLabelColumn != nil && len(*s.TargetLabelColumn) < 1 { invalidParams.Add(request.NewErrParamMinLen("TargetLabelColumn", 1)) } diff --git a/service/securityhub/api.go b/service/securityhub/api.go index 88a2fcfc87e..32a254ec861 100644 --- a/service/securityhub/api.go +++ b/service/securityhub/api.go @@ -8377,10 +8377,10 @@ type AutomationRulesConfig struct { // Specifies whether a rule is the last to be applied with respect to a finding // that matches the rule criteria. This is useful when a finding matches the - // criteria for multiple rules, and each rule has different actions. If the - // value of this field is set to true for a rule, Security Hub applies the rule - // action to a finding that matches the rule criteria and doesn't evaluate other - // rules for the finding. The default value of this field is false. + // criteria for multiple rules, and each rule has different actions. If a rule + // is terminal, Security Hub applies the rule action to a finding that matches + // the rule criteria and doesn't evaluate other rules for the finding. By default, + // a rule isn't terminal. IsTerminal *bool `type:"boolean"` // The Amazon Resource Name (ARN) of a rule. @@ -9031,10 +9031,10 @@ type AutomationRulesMetadata struct { // Specifies whether a rule is the last to be applied with respect to a finding // that matches the rule criteria. This is useful when a finding matches the - // criteria for multiple rules, and each rule has different actions. If the - // value of this field is set to true for a rule, Security Hub applies the rule - // action to a finding that matches the rule criteria and doesn't evaluate other - // rules for the finding. The default value of this field is false. + // criteria for multiple rules, and each rule has different actions. If a rule + // is terminal, Security Hub applies the rule action to a finding that matches + // the rule criteria and doesn't evaluate other rules for the finding. By default, + // a rule isn't terminal. IsTerminal *bool `type:"boolean"` // The Amazon Resource Name (ARN) for the rule. @@ -11284,6 +11284,191 @@ func (s *AwsAppSyncGraphQlApiUserPoolConfigDetails) SetUserPoolId(v string) *Aws return s } +// The configuration of the workgroup, which includes the location in Amazon +// Simple Storage Service (Amazon S3) where query results are stored, the encryption +// option, if any, used for query results, whether Amazon CloudWatch metrics +// are enabled for the workgroup, and the limit for the amount of bytes scanned +// (cutoff) per query, if it is specified. +type AwsAthenaWorkGroupConfigurationDetails struct { + _ struct{} `type:"structure"` + + // The location in Amazon S3 where query and calculation results are stored + // and the encryption option, if any, used for query and calculation results. + // These are known as client-side settings. If workgroup settings override client-side + // settings, then the query uses the workgroup settings. + ResultConfiguration *AwsAthenaWorkGroupConfigurationResultConfigurationDetails `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AwsAthenaWorkGroupConfigurationDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AwsAthenaWorkGroupConfigurationDetails) GoString() string { + return s.String() +} + +// SetResultConfiguration sets the ResultConfiguration field's value. +func (s *AwsAthenaWorkGroupConfigurationDetails) SetResultConfiguration(v *AwsAthenaWorkGroupConfigurationResultConfigurationDetails) *AwsAthenaWorkGroupConfigurationDetails { + s.ResultConfiguration = v + return s +} + +// The location in Amazon Simple Storage Service (Amazon S3) where query and +// calculation results are stored and the encryption option, if any, used for +// query and calculation results. These are known as client-side settings. If +// workgroup settings override client-side settings, then the query uses the +// workgroup settings. +type AwsAthenaWorkGroupConfigurationResultConfigurationDetails struct { + _ struct{} `type:"structure"` + + // Specifies the method used to encrypt the user’s data stores in the Athena + // workgroup. + EncryptionConfiguration *AwsAthenaWorkGroupConfigurationResultConfigurationEncryptionConfigurationDetails `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AwsAthenaWorkGroupConfigurationResultConfigurationDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AwsAthenaWorkGroupConfigurationResultConfigurationDetails) GoString() string { + return s.String() +} + +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *AwsAthenaWorkGroupConfigurationResultConfigurationDetails) SetEncryptionConfiguration(v *AwsAthenaWorkGroupConfigurationResultConfigurationEncryptionConfigurationDetails) *AwsAthenaWorkGroupConfigurationResultConfigurationDetails { + s.EncryptionConfiguration = v + return s +} + +// Specifies the method used to encrypt the user’s data stores in the Athena +// workgroup. +type AwsAthenaWorkGroupConfigurationResultConfigurationEncryptionConfigurationDetails struct { + _ struct{} `type:"structure"` + + // Indicates whether Amazon Simple Storage Service (Amazon S3) server-side encryption + // with Amazon S3 managed keys (SSE_S3), server-side encryption with KMS keys + // (SSE_KMS), or client-side encryption with KMS customer managed keys (CSE_KMS) + // is used. + EncryptionOption *string `type:"string"` + + // For SSE_KMS and CSE_KMS, this is the KMS key Amazon Resource Name (ARN) or + // ID. + KmsKey *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AwsAthenaWorkGroupConfigurationResultConfigurationEncryptionConfigurationDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AwsAthenaWorkGroupConfigurationResultConfigurationEncryptionConfigurationDetails) GoString() string { + return s.String() +} + +// SetEncryptionOption sets the EncryptionOption field's value. +func (s *AwsAthenaWorkGroupConfigurationResultConfigurationEncryptionConfigurationDetails) SetEncryptionOption(v string) *AwsAthenaWorkGroupConfigurationResultConfigurationEncryptionConfigurationDetails { + s.EncryptionOption = &v + return s +} + +// SetKmsKey sets the KmsKey field's value. +func (s *AwsAthenaWorkGroupConfigurationResultConfigurationEncryptionConfigurationDetails) SetKmsKey(v string) *AwsAthenaWorkGroupConfigurationResultConfigurationEncryptionConfigurationDetails { + s.KmsKey = &v + return s +} + +// Provides information about an Amazon Athena workgroup. +type AwsAthenaWorkGroupDetails struct { + _ struct{} `type:"structure"` + + // The configuration of the workgroup, which includes the location in Amazon + // Simple Storage Service (Amazon S3) where query results are stored, the encryption + // option, if any, used for query results, whether Amazon CloudWatch metrics + // are enabled for the workgroup, and the limit for the amount of bytes scanned + // (cutoff) per query, if it is specified. + Configuration *AwsAthenaWorkGroupConfigurationDetails `type:"structure"` + + // The workgroup description. + Description *string `type:"string"` + + // The workgroup name. + Name *string `type:"string"` + + // Whether the workgroup is enabled or disabled. + State *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AwsAthenaWorkGroupDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AwsAthenaWorkGroupDetails) GoString() string { + return s.String() +} + +// SetConfiguration sets the Configuration field's value. +func (s *AwsAthenaWorkGroupDetails) SetConfiguration(v *AwsAthenaWorkGroupConfigurationDetails) *AwsAthenaWorkGroupDetails { + s.Configuration = v + return s +} + +// SetDescription sets the Description field's value. +func (s *AwsAthenaWorkGroupDetails) SetDescription(v string) *AwsAthenaWorkGroupDetails { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *AwsAthenaWorkGroupDetails) SetName(v string) *AwsAthenaWorkGroupDetails { + s.Name = &v + return s +} + +// SetState sets the State field's value. +func (s *AwsAthenaWorkGroupDetails) SetState(v string) *AwsAthenaWorkGroupDetails { + s.State = &v + return s +} + // An Availability Zone for the automatic scaling group. type AwsAutoScalingAutoScalingGroupAvailabilityZonesListDetails struct { _ struct{} `type:"structure"` @@ -32618,6 +32803,55 @@ func (s *AwsRdsDbClusterOptionGroupMembership) SetStatus(v string) *AwsRdsDbClus return s } +// Contains the name and values of a manual Amazon Relational Database Service +// (RDS) DB cluster snapshot attribute. +type AwsRdsDbClusterSnapshotDbClusterSnapshotAttribute struct { + _ struct{} `type:"structure"` + + // The name of the manual DB cluster snapshot attribute. The attribute named + // restore refers to the list of Amazon Web Services accounts that have permission + // to copy or restore the manual DB cluster snapshot. + AttributeName *string `type:"string"` + + // The value(s) for the manual DB cluster snapshot attribute. If the AttributeName + // field is set to restore, then this element returns a list of IDs of the Amazon + // Web Services accounts that are authorized to copy or restore the manual DB + // cluster snapshot. If a value of all is in the list, then the manual DB cluster + // snapshot is public and available for any Amazon Web Services account to copy + // or restore. + AttributeValues []*string `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AwsRdsDbClusterSnapshotDbClusterSnapshotAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AwsRdsDbClusterSnapshotDbClusterSnapshotAttribute) GoString() string { + return s.String() +} + +// SetAttributeName sets the AttributeName field's value. +func (s *AwsRdsDbClusterSnapshotDbClusterSnapshotAttribute) SetAttributeName(v string) *AwsRdsDbClusterSnapshotDbClusterSnapshotAttribute { + s.AttributeName = &v + return s +} + +// SetAttributeValues sets the AttributeValues field's value. +func (s *AwsRdsDbClusterSnapshotDbClusterSnapshotAttribute) SetAttributeValues(v []*string) *AwsRdsDbClusterSnapshotDbClusterSnapshotAttribute { + s.AttributeValues = v + return s +} + // Information about an Amazon RDS DB cluster snapshot. type AwsRdsDbClusterSnapshotDetails struct { _ struct{} `type:"structure"` @@ -32640,6 +32874,9 @@ type AwsRdsDbClusterSnapshotDetails struct { // The DB cluster identifier. DbClusterIdentifier *string `type:"string"` + // Contains the name and values of a manual DB cluster snapshot attribute. + DbClusterSnapshotAttributes []*AwsRdsDbClusterSnapshotDbClusterSnapshotAttribute `type:"list"` + // The identifier of the DB cluster snapshot. DbClusterSnapshotIdentifier *string `type:"string"` @@ -32731,6 +32968,12 @@ func (s *AwsRdsDbClusterSnapshotDetails) SetDbClusterIdentifier(v string) *AwsRd return s } +// SetDbClusterSnapshotAttributes sets the DbClusterSnapshotAttributes field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetDbClusterSnapshotAttributes(v []*AwsRdsDbClusterSnapshotDbClusterSnapshotAttribute) *AwsRdsDbClusterSnapshotDetails { + s.DbClusterSnapshotAttributes = v + return s +} + // SetDbClusterSnapshotIdentifier sets the DbClusterSnapshotIdentifier field's value. func (s *AwsRdsDbClusterSnapshotDetails) SetDbClusterSnapshotIdentifier(v string) *AwsRdsDbClusterSnapshotDetails { s.DbClusterSnapshotIdentifier = &v @@ -44542,10 +44785,10 @@ type CreateAutomationRuleInput struct { // Specifies whether a rule is the last to be applied with respect to a finding // that matches the rule criteria. This is useful when a finding matches the - // criteria for multiple rules, and each rule has different actions. If the - // value of this field is set to true for a rule, Security Hub applies the rule - // action to a finding that matches the rule criteria and doesn't evaluate other - // rules for the finding. The default value of this field is false. + // criteria for multiple rules, and each rule has different actions. If a rule + // is terminal, Security Hub applies the rule action to a finding that matches + // the rule criteria and doesn't evaluate other rules for the finding. By default, + // a rule isn't terminal. IsTerminal *bool `type:"boolean"` // The name of the rule. @@ -50604,30 +50847,61 @@ func (s *Malware) SetType(v string) *Malware { return s } -// A map filter for querying findings. Each map filter provides the field to -// check, the value to look for, and the comparison operator. +// A map filter for filtering Security Hub findings. Each map filter provides +// the field to check for, the value to check for, and the comparison operator. type MapFilter struct { _ struct{} `type:"structure"` - // The condition to apply to the key value when querying for findings with a - // map filter. + // The condition to apply to the key value when filtering Security Hub findings + // with a map filter. + // + // To search for values that have the filter value, use one of the following + // comparison operators: // - // To search for values that exactly match the filter value, use EQUALS. For - // example, for the ResourceTags field, the filter Department EQUALS Security - // matches findings that have the value Security for the tag Department. + // * To search for values that include the filter value, use CONTAINS. For + // example, for the ResourceTags field, the filter Department CONTAINS Security + // matches findings that include the value Security for the Department tag. + // In the same example, a finding with a value of Security team for the Department + // tag is a match. // - // To search for values other than the filter value, use NOT_EQUALS. For example, - // for the ResourceTags field, the filter Department NOT_EQUALS Finance matches - // findings that do not have the value Finance for the tag Department. + // * To search for values that exactly match the filter value, use EQUALS. + // For example, for the ResourceTags field, the filter Department EQUALS + // Security matches findings that have the value Security for the Department + // tag. + // + // CONTAINS and EQUALS filters on the same field are joined by OR. A finding + // matches if it matches any one of those filters. For example, the filters + // Department CONTAINS Security OR Department CONTAINS Finance match a finding + // that includes either Security, Finance, or both values. + // + // To search for values that don't have the filter value, use one of the following + // comparison operators: + // + // * To search for values that exclude the filter value, use NOT_CONTAINS. + // For example, for the ResourceTags field, the filter Department NOT_CONTAINS + // Finance matches findings that exclude the value Finance for the Department + // tag. + // + // * To search for values other than the filter value, use NOT_EQUALS. For + // example, for the ResourceTags field, the filter Department NOT_EQUALS + // Finance matches findings that don’t have the value Finance for the Department + // tag. // - // EQUALS filters on the same field are joined by OR. A finding matches if it - // matches any one of those filters. + // NOT_CONTAINS and NOT_EQUALS filters on the same field are joined by AND. + // A finding matches only if it matches all of those filters. For example, the + // filters Department NOT_CONTAINS Security AND Department NOT_CONTAINS Finance + // match a finding that excludes both the Security and Finance values. // - // NOT_EQUALS filters on the same field are joined by AND. A finding matches - // only if it matches all of those filters. + // CONTAINS filters can only be used with other CONTAINS filters. NOT_CONTAINS + // filters can only be used with other NOT_CONTAINS filters. // - // You cannot have both an EQUALS filter and a NOT_EQUALS filter on the same - // field. + // You can’t have both a CONTAINS filter and a NOT_CONTAINS filter on the + // same field. Similarly, you can’t have both an EQUALS filter and a NOT_EQUALS + // filter on the same field. Combining filters in this way returns an error. + // + // CONTAINS and NOT_CONTAINS operators can be used only with automation rules. + // For more information, see Automation rules (https://docs.aws.amazon.com/securityhub/latest/userguide/automation-rules.html) + // in the Security Hub User Guide. Comparison *string `type:"string" enum:"MapFilterComparison"` // The key of the map filter. For example, for ResourceTags, Key identifies @@ -50636,7 +50910,7 @@ type MapFilter struct { // The value for the key in the map filter. Filter values are case sensitive. // For example, one of the values for a tag called Department might be Security. - // If you provide security as the filter value, then there is no match. + // If you provide security as the filter value, then there's no match. Value *string `type:"string"` } @@ -52514,6 +52788,11 @@ type ResourceDetails struct { // databases, microservices, and APIs from a single GraphQL endpoint. AwsAppSyncGraphQlApi *AwsAppSyncGraphQlApiDetails `type:"structure"` + // Provides information about an Amazon Athena workgroup. A workgroup helps + // you separate users, teams, applications, or workloads. It also helps you + // set limits on data processing and track costs. + AwsAthenaWorkGroup *AwsAthenaWorkGroupDetails `type:"structure"` + // Details for an autoscaling group. AwsAutoScalingAutoScalingGroup *AwsAutoScalingAutoScalingGroupDetails `type:"structure"` @@ -52861,6 +53140,12 @@ func (s *ResourceDetails) SetAwsAppSyncGraphQlApi(v *AwsAppSyncGraphQlApiDetails return s } +// SetAwsAthenaWorkGroup sets the AwsAthenaWorkGroup field's value. +func (s *ResourceDetails) SetAwsAthenaWorkGroup(v *AwsAthenaWorkGroupDetails) *ResourceDetails { + s.AwsAthenaWorkGroup = v + return s +} + // SetAwsAutoScalingAutoScalingGroup sets the AwsAutoScalingAutoScalingGroup field's value. func (s *ResourceDetails) SetAwsAutoScalingAutoScalingGroup(v *AwsAutoScalingAutoScalingGroupDetails) *ResourceDetails { s.AwsAutoScalingAutoScalingGroup = v @@ -56154,52 +56439,66 @@ func (s *StatusReason) SetReasonCode(v string) *StatusReason { return s } -// A string filter for querying findings. +// A string filter for filtering Security Hub findings. type StringFilter struct { _ struct{} `type:"structure"` - // The condition to apply to a string value when querying for findings. To search - // for values that contain the filter criteria value, use one of the following + // The condition to apply to a string value when filtering Security Hub findings. + // + // To search for values that have the filter value, use one of the following // comparison operators: // + // * To search for values that include the filter value, use CONTAINS. For + // example, the filter Title CONTAINS CloudFront matches findings that have + // a Title that includes the string CloudFront. + // // * To search for values that exactly match the filter value, use EQUALS. - // For example, the filter ResourceType EQUALS AwsEc2SecurityGroup only matches - // findings that have a resource type of AwsEc2SecurityGroup. + // For example, the filter AwsAccountId EQUALS 123456789012 only matches + // findings that have an account ID of 123456789012. // // * To search for values that start with the filter value, use PREFIX. For - // example, the filter ResourceType PREFIX AwsIam matches findings that have - // a resource type that starts with AwsIam. Findings with a resource type - // of AwsIamPolicy, AwsIamRole, or AwsIamUser would all match. + // example, the filter ResourceRegion PREFIX us matches findings that have + // a ResourceRegion that starts with us. A ResourceRegion that starts with + // a different value, such as af, ap, or ca, doesn't match. // - // EQUALS and PREFIX filters on the same field are joined by OR. A finding matches - // if it matches any one of those filters. + // CONTAINS, EQUALS, and PREFIX filters on the same field are joined by OR. + // A finding matches if it matches any one of those filters. For example, the + // filters Title CONTAINS CloudFront OR Title CONTAINS CloudWatch match a finding + // that includes either CloudFront, CloudWatch, or both strings in the title. // - // To search for values that do not contain the filter criteria value, use one - // of the following comparison operators: + // To search for values that don’t have the filter value, use one of the following + // comparison operators: + // + // * To search for values that exclude the filter value, use NOT_CONTAINS. + // For example, the filter Title NOT_CONTAINS CloudFront matches findings + // that have a Title that excludes the string CloudFront. // - // * To search for values that do not exactly match the filter value, use - // NOT_EQUALS. For example, the filter ResourceType NOT_EQUALS AwsIamPolicy - // matches findings that have a resource type other than AwsIamPolicy. + // * To search for values other than the filter value, use NOT_EQUALS. For + // example, the filter AwsAccountId NOT_EQUALS 123456789012 only matches + // findings that have an account ID other than 123456789012. // - // * To search for values that do not start with the filter value, use PREFIX_NOT_EQUALS. - // For example, the filter ResourceType PREFIX_NOT_EQUALS AwsIam matches - // findings that have a resource type that does not start with AwsIam. Findings - // with a resource type of AwsIamPolicy, AwsIamRole, or AwsIamUser would - // all be excluded from the results. + // * To search for values that don't start with the filter value, use PREFIX_NOT_EQUALS. + // For example, the filter ResourceRegion PREFIX_NOT_EQUALS us matches findings + // with a ResourceRegion that starts with a value other than us. // - // NOT_EQUALS and PREFIX_NOT_EQUALS filters on the same field are joined by - // AND. A finding matches only if it matches all of those filters. + // NOT_CONTAINS, NOT_EQUALS, and PREFIX_NOT_EQUALS filters on the same field + // are joined by AND. A finding matches only if it matches all of those filters. + // For example, the filters Title NOT_CONTAINS CloudFront AND Title NOT_CONTAINS + // CloudWatch match a finding that excludes both CloudFront and CloudWatch in + // the title. // - // For filters on the same field, you cannot provide both an EQUALS filter and - // a NOT_EQUALS or PREFIX_NOT_EQUALS filter. Combining filters in this way always - // returns an error, even if the provided filter values would return valid results. + // You can’t have both a CONTAINS filter and a NOT_CONTAINS filter on the + // same field. Similarly, you can't provide both an EQUALS filter and a NOT_EQUALS + // or PREFIX_NOT_EQUALS filter on the same field. Combining filters in this + // way returns an error. CONTAINS filters can only be used with other CONTAINS + // filters. NOT_CONTAINS filters can only be used with other NOT_CONTAINS filters. // // You can combine PREFIX filters with NOT_EQUALS or PREFIX_NOT_EQUALS filters - // for the same field. Security Hub first processes the PREFIX filters, then - // the NOT_EQUALS or PREFIX_NOT_EQUALS filters. + // for the same field. Security Hub first processes the PREFIX filters, and + // then the NOT_EQUALS or PREFIX_NOT_EQUALS filters. // - // For example, for the following filter, Security Hub first identifies findings - // that have resource types that start with either AwsIAM or AwsEc2. It then + // For example, for the following filters, Security Hub first identifies findings + // that have resource types that start with either AwsIam or AwsEc2. It then // excludes findings that have a resource type of AwsIamPolicy and findings // that have a resource type of AwsEc2NetworkInterface. // @@ -56210,11 +56509,15 @@ type StringFilter struct { // * ResourceType NOT_EQUALS AwsIamPolicy // // * ResourceType NOT_EQUALS AwsEc2NetworkInterface + // + // CONTAINS and NOT_CONTAINS operators can be used only with automation rules. + // For more information, see Automation rules (https://docs.aws.amazon.com/securityhub/latest/userguide/automation-rules.html) + // in the Security Hub User Guide. Comparison *string `type:"string" enum:"StringFilterComparison"` // The string filter value. Filter values are case sensitive. For example, the // product name for control-based findings is Security Hub. If you provide security - // hub as the filter text, then there is no match. + // hub as the filter value, there's no match. Value *string `type:"string"` } @@ -56908,10 +57211,10 @@ type UpdateAutomationRulesRequestItem struct { // Specifies whether a rule is the last to be applied with respect to a finding // that matches the rule criteria. This is useful when a finding matches the - // criteria for multiple rules, and each rule has different actions. If the - // value of this field is set to true for a rule, Security Hub applies the rule - // action to a finding that matches the rule criteria and doesn't evaluate other - // rules for the finding. The default value of this field is false. + // criteria for multiple rules, and each rule has different actions. If a rule + // is terminal, Security Hub applies the rule action to a finding that matches + // the rule criteria and doesn't evaluate other rules for the finding. By default, + // a rule isn't terminal. IsTerminal *bool `type:"boolean"` // The Amazon Resource Name (ARN) for the rule. @@ -58500,6 +58803,12 @@ const ( // MapFilterComparisonNotEquals is a MapFilterComparison enum value MapFilterComparisonNotEquals = "NOT_EQUALS" + + // MapFilterComparisonContains is a MapFilterComparison enum value + MapFilterComparisonContains = "CONTAINS" + + // MapFilterComparisonNotContains is a MapFilterComparison enum value + MapFilterComparisonNotContains = "NOT_CONTAINS" ) // MapFilterComparison_Values returns all elements of the MapFilterComparison enum @@ -58507,6 +58816,8 @@ func MapFilterComparison_Values() []string { return []string{ MapFilterComparisonEquals, MapFilterComparisonNotEquals, + MapFilterComparisonContains, + MapFilterComparisonNotContains, } } @@ -58718,6 +59029,12 @@ const ( // StringFilterComparisonPrefixNotEquals is a StringFilterComparison enum value StringFilterComparisonPrefixNotEquals = "PREFIX_NOT_EQUALS" + + // StringFilterComparisonContains is a StringFilterComparison enum value + StringFilterComparisonContains = "CONTAINS" + + // StringFilterComparisonNotContains is a StringFilterComparison enum value + StringFilterComparisonNotContains = "NOT_CONTAINS" ) // StringFilterComparison_Values returns all elements of the StringFilterComparison enum @@ -58727,6 +59044,8 @@ func StringFilterComparison_Values() []string { StringFilterComparisonPrefix, StringFilterComparisonNotEquals, StringFilterComparisonPrefixNotEquals, + StringFilterComparisonContains, + StringFilterComparisonNotContains, } } diff --git a/service/sts/api.go b/service/sts/api.go index 7ac6b93f442..11af63b4d8b 100644 --- a/service/sts/api.go +++ b/service/sts/api.go @@ -1460,6 +1460,9 @@ type AssumeRoleInput struct { // in the IAM User Guide. PolicyArns []*PolicyDescriptorType `type:"list"` + // Reserved for future use. + ProvidedContexts []*ProvidedContext `type:"list"` + // The Amazon Resource Name (ARN) of the role to assume. // // RoleArn is a required field @@ -1633,6 +1636,16 @@ func (s *AssumeRoleInput) Validate() error { } } } + if s.ProvidedContexts != nil { + for i, v := range s.ProvidedContexts { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ProvidedContexts", i), err.(request.ErrInvalidParams)) + } + } + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -1674,6 +1687,12 @@ func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleIn return s } +// SetProvidedContexts sets the ProvidedContexts field's value. +func (s *AssumeRoleInput) SetProvidedContexts(v []*ProvidedContext) *AssumeRoleInput { + s.ProvidedContexts = v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { s.RoleArn = &v @@ -2266,7 +2285,8 @@ type AssumeRoleWithWebIdentityInput struct { // The OAuth 2.0 access token or OpenID Connect ID token that is provided by // the identity provider. Your application must get this token by authenticating // the user who is using your application with a web identity provider before - // the application makes an AssumeRoleWithWebIdentity call. + // the application makes an AssumeRoleWithWebIdentity call. Only tokens with + // RSA algorithms (RS256) are supported. // // WebIdentityToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by AssumeRoleWithWebIdentityInput's @@ -3385,6 +3405,63 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { return s } +// Reserved for future use. +type ProvidedContext struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + ContextAssertion *string `min:"4" type:"string"` + + // Reserved for future use. + ProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvidedContext) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvidedContext) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProvidedContext) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProvidedContext"} + if s.ContextAssertion != nil && len(*s.ContextAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ContextAssertion", 4)) + } + if s.ProviderArn != nil && len(*s.ProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContextAssertion sets the ContextAssertion field's value. +func (s *ProvidedContext) SetContextAssertion(v string) *ProvidedContext { + s.ContextAssertion = &v + return s +} + +// SetProviderArn sets the ProviderArn field's value. +func (s *ProvidedContext) SetProviderArn(v string) *ProvidedContext { + s.ProviderArn = &v + return s +} + // You can pass custom key-value pair attributes when you assume a role or federate // a user. These are called session tags. You can then use the session tags // to control access to resources. For more information, see Tagging Amazon diff --git a/service/transfer/api.go b/service/transfer/api.go index efbcc07691d..c44d7f07f0d 100644 --- a/service/transfer/api.go +++ b/service/transfer/api.go @@ -260,9 +260,12 @@ func (c *Transfer) CreateConnectorRequest(input *CreateConnectorInput) (req *req // CreateConnector API operation for AWS Transfer Family. // // Creates the connector, which captures the parameters for an outbound connection -// for the AS2 protocol. The connector is required for sending files to an externally -// hosted AS2 server. For more details about connectors, see Create AS2 connectors -// (https://docs.aws.amazon.com/transfer/latest/userguide/create-b2b-server.html#configure-as2-connector). +// for the AS2 or SFTP protocol. The connector is required for sending files +// to an externally hosted AS2 or SFTP server. For more details about AS2 connectors, +// see Create AS2 connectors (https://docs.aws.amazon.com/transfer/latest/userguide/create-b2b-server.html#configure-as2-connector). +// +// You must specify exactly one configuration object: either for AS2 (As2Config) +// or SFTP (SftpConfig). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1033,7 +1036,7 @@ func (c *Transfer) DeleteConnectorRequest(input *DeleteConnectorInput) (req *req // DeleteConnector API operation for AWS Transfer Family. // -// Deletes the agreement that's specified in the provided ConnectorId. +// Deletes the connector that's specified in the provided ConnectorId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4871,8 +4874,22 @@ func (c *Transfer) StartFileTransferRequest(input *StartFileTransferInput) (req // StartFileTransfer API operation for AWS Transfer Family. // -// Begins an outbound file transfer to a remote AS2 server. You specify the -// ConnectorId and the file paths for where to send the files. +// Begins a file transfer between local Amazon Web Services storage and a remote +// AS2 or SFTP server. +// +// - For an AS2 connector, you specify the ConnectorId and one or more SendFilePaths +// to identify the files you want to transfer. +// +// - For an SFTP connector, the file transfer can be either outbound or inbound. +// In both cases, you specify the ConnectorId. Depending on the direction +// of the transfer, you also specify the following items: If you are transferring +// file from a partner's SFTP server to a Transfer Family server, you specify +// one or more RetreiveFilePaths to identify the files you want to transfer, +// and a LocalDirectoryPath to specify the destination folder. If you are +// transferring file to a partner's SFTP server from Amazon Web Services +// storage, you specify one or more SendFilePaths to identify the files you +// want to transfer, and a RemoteDirectoryPath to specify the destination +// folder. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5228,6 +5245,99 @@ func (c *Transfer) TagResourceWithContext(ctx aws.Context, input *TagResourceInp return out, req.Send() } +const opTestConnection = "TestConnection" + +// TestConnectionRequest generates a "aws/request.Request" representing the +// client's request for the TestConnection operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TestConnection for more information on using the TestConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the TestConnectionRequest method. +// req, resp := client.TestConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/TestConnection +func (c *Transfer) TestConnectionRequest(input *TestConnectionInput) (req *request.Request, output *TestConnectionOutput) { + op := &request.Operation{ + Name: opTestConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TestConnectionInput{} + } + + output = &TestConnectionOutput{} + req = c.newRequest(op, input, output) + return +} + +// TestConnection API operation for AWS Transfer Family. +// +// Tests whether your SFTP connector is set up successfully. We highly recommend +// that you call this operation to test your ability to transfer files between +// a Transfer Family server and a trading partner's SFTP server. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Transfer Family's +// API operation TestConnection for usage and error information. +// +// Returned Error Types: +// +// - ServiceUnavailableException +// The request has failed because the Amazon Web ServicesTransfer Family service +// is not available. +// +// - InternalServiceError +// This exception is thrown when an error occurs in the Amazon Web ServicesTransfer +// Family service. +// +// - InvalidRequestException +// This exception is thrown when the client submits a malformed request. +// +// - ResourceNotFoundException +// This exception is thrown when a resource is not found by the Amazon Web ServicesTransfer +// Family service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/TestConnection +func (c *Transfer) TestConnection(input *TestConnectionInput) (*TestConnectionOutput, error) { + req, out := c.TestConnectionRequest(input) + return out, req.Send() +} + +// TestConnectionWithContext is the same as TestConnection with the addition of +// the ability to pass a context and additional request options. +// +// See TestConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Transfer) TestConnectionWithContext(ctx aws.Context, input *TestConnectionInput, opts ...request.Option) (*TestConnectionOutput, error) { + req, out := c.TestConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opTestIdentityProvider = "TestIdentityProvider" // TestIdentityProviderRequest generates a "aws/request.Request" representing the @@ -6298,9 +6408,9 @@ func (s *AccessDeniedException) RequestID() string { return s.RespMetadata.RequestID } -// Contains the details for a connector object. The connector object is used -// for AS2 outbound processes, to connect the Transfer Family customer with -// the trading partner. +// Contains the details for an AS2 connector object. The connector object is +// used for AS2 outbound processes, to connect the Transfer Family customer +// with the trading partner. type As2ConnectorConfig struct { _ struct{} `type:"structure"` @@ -7126,21 +7236,22 @@ type CreateConnectorInput struct { // AccessRole is a required field AccessRole *string `min:"20" type:"string" required:"true"` - // A structure that contains the parameters for a connector object. - // - // As2Config is a required field - As2Config *As2ConnectorConfig `type:"structure" required:"true"` + // A structure that contains the parameters for an AS2 connector object. + As2Config *As2ConnectorConfig `type:"structure"` // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) // role that allows a connector to turn on CloudWatch logging for Amazon S3 // events. When set, you can view connector activity in your CloudWatch logs. LoggingRole *string `min:"20" type:"string"` + // A structure that contains the parameters for an SFTP connector object. + SftpConfig *SftpConnectorConfig `type:"structure"` + // Key-value pairs that can be used to group and search for connectors. Tags // are metadata attached to connectors for any purpose. Tags []*Tag `min:"1" type:"list"` - // The URL of the partner's AS2 endpoint. + // The URL of the partner's AS2 or SFTP endpoint. // // Url is a required field Url *string `type:"string" required:"true"` @@ -7173,9 +7284,6 @@ func (s *CreateConnectorInput) Validate() error { if s.AccessRole != nil && len(*s.AccessRole) < 20 { invalidParams.Add(request.NewErrParamMinLen("AccessRole", 20)) } - if s.As2Config == nil { - invalidParams.Add(request.NewErrParamRequired("As2Config")) - } if s.LoggingRole != nil && len(*s.LoggingRole) < 20 { invalidParams.Add(request.NewErrParamMinLen("LoggingRole", 20)) } @@ -7190,6 +7298,11 @@ func (s *CreateConnectorInput) Validate() error { invalidParams.AddNested("As2Config", err.(request.ErrInvalidParams)) } } + if s.SftpConfig != nil { + if err := s.SftpConfig.Validate(); err != nil { + invalidParams.AddNested("SftpConfig", err.(request.ErrInvalidParams)) + } + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -7225,6 +7338,12 @@ func (s *CreateConnectorInput) SetLoggingRole(v string) *CreateConnectorInput { return s } +// SetSftpConfig sets the SftpConfig field's value. +func (s *CreateConnectorInput) SetSftpConfig(v *SftpConnectorConfig) *CreateConnectorInput { + s.SftpConfig = v + return s +} + // SetTags sets the Tags field's value. func (s *CreateConnectorInput) SetTags(v []*Tag) *CreateConnectorInput { s.Tags = v @@ -7869,7 +7988,8 @@ type CreateUserInput struct { // // In most cases, you can use this value instead of the session policy to lock // your user down to the designated home directory ("chroot"). To do this, you - // can set Entry to / and set Target to the HomeDirectory parameter value. + // can set Entry to / and set Target to the value the user should see for their + // home directory when they log in. // // The following is an Entry and Target pair example for chroot. // @@ -10874,7 +10994,7 @@ type DescribedConnector struct { // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` - // A structure that contains the parameters for a connector object. + // A structure that contains the parameters for an AS2 connector object. As2Config *As2ConnectorConfig `type:"structure"` // The unique identifier for the connector. @@ -10885,10 +11005,13 @@ type DescribedConnector struct { // events. When set, you can view connector activity in your CloudWatch logs. LoggingRole *string `min:"20" type:"string"` + // A structure that contains the parameters for an SFTP connector object. + SftpConfig *SftpConnectorConfig `type:"structure"` + // Key-value pairs that can be used to group and search for connectors. Tags []*Tag `min:"1" type:"list"` - // The URL of the partner's AS2 endpoint. + // The URL of the partner's AS2 or SFTP endpoint. Url *string `type:"string"` } @@ -10940,6 +11063,12 @@ func (s *DescribedConnector) SetLoggingRole(v string) *DescribedConnector { return s } +// SetSftpConfig sets the SftpConfig field's value. +func (s *DescribedConnector) SetSftpConfig(v *SftpConnectorConfig) *DescribedConnector { + s.SftpConfig = v + return s +} + // SetTags sets the Tags field's value. func (s *DescribedConnector) SetTags(v []*Tag) *DescribedConnector { s.Tags = v @@ -14874,7 +15003,7 @@ type ListedConnector struct { // The unique identifier for the connector. ConnectorId *string `min:"19" type:"string"` - // The URL of the partner's AS2 endpoint. + // The URL of the partner's AS2 or SFTP endpoint. Url *string `type:"string"` } @@ -16228,6 +16357,78 @@ func (s *ServiceUnavailableException) RequestID() string { return s.RespMetadata.RequestID } +// Contains the details for an SFTP connector object. The connector object is +// used for transferring files to and from a partner's SFTP server. +type SftpConnectorConfig struct { + _ struct{} `type:"structure"` + + // The public portion of the host key, or keys, that are used to authenticate + // the user to the external server to which you are connecting. You can use + // the ssh-keyscan command against the SFTP server to retrieve the necessary + // key. + // + // The three standard SSH public key format elements are