diff --git a/.changelog/017f7d1342ef433ab59c58213a878d98.json b/.changelog/017f7d1342ef433ab59c58213a878d98.json new file mode 100644 index 00000000000..f99358a262c --- /dev/null +++ b/.changelog/017f7d1342ef433ab59c58213a878d98.json @@ -0,0 +1,8 @@ +{ + "id": "017f7d13-42ef-433a-b59c-58213a878d98", + "type": "documentation", + "description": "Documentation Updates for Amazon Fraud Detector", + "modules": [ + "service/frauddetector" + ] +} \ No newline at end of file diff --git a/.changelog/92b596e6dbe54594a432c831c9bfb8de.json b/.changelog/92b596e6dbe54594a432c831c9bfb8de.json new file mode 100644 index 00000000000..bd00addbd81 --- /dev/null +++ b/.changelog/92b596e6dbe54594a432c831c9bfb8de.json @@ -0,0 +1,8 @@ +{ + "id": "92b596e6-dbe5-4594-a432-c831c9bfb8de", + "type": "feature", + "description": "This change allows customers to enable data capturing while running a batch transform job, and configure monitoring schedule to monitoring the captured data.", + "modules": [ + "service/sagemaker" + ] +} \ No newline at end of file diff --git a/.changelog/9ca01dec42ba46978fa0e76b04f41650.json b/.changelog/9ca01dec42ba46978fa0e76b04f41650.json new file mode 100644 index 00000000000..cbfbf710085 --- /dev/null +++ b/.changelog/9ca01dec42ba46978fa0e76b04f41650.json @@ -0,0 +1,8 @@ +{ + "id": "9ca01dec-42ba-4697-8fa0-e76b04f41650", + "type": "feature", + "description": "This release allows subscribers to enable Dedicated IPs (managed) to send email via a fully managed dedicated IP experience. It also adds identities' VerificationStatus in the response of GetEmailIdentity and ListEmailIdentities APIs, and ImportJobs counts in the response of ListImportJobs API.", + "modules": [ + "service/sesv2" + ] +} \ No newline at end of file diff --git a/.changelog/d9cf345364b94abe8bb65316925fd5a4.json b/.changelog/d9cf345364b94abe8bb65316925fd5a4.json new file mode 100644 index 00000000000..428557d77c4 --- /dev/null +++ b/.changelog/d9cf345364b94abe8bb65316925fd5a4.json @@ -0,0 +1,8 @@ +{ + "id": "d9cf3453-64b9-4abe-8bb6-5316925fd5a4", + "type": "feature", + "description": "Updated the ListNamespaces API to support the NAME and HTTP_NAME filters, and the BEGINS_WITH filter condition.", + "modules": [ + "service/servicediscovery" + ] +} \ No newline at end of file diff --git a/service/frauddetector/api_op_CreateBatchImportJob.go b/service/frauddetector/api_op_CreateBatchImportJob.go index c33c334e96a..2f865499351 100644 --- a/service/frauddetector/api_op_CreateBatchImportJob.go +++ b/service/frauddetector/api_op_CreateBatchImportJob.go @@ -35,8 +35,11 @@ type CreateBatchImportJobInput struct { EventTypeName *string // The ARN of the IAM role created for Amazon S3 bucket that holds your data file. - // The IAM role must have read and write permissions to both input and output S3 - // buckets. + // The IAM role must have read permissions to your input S3 bucket and write + // permissions to your output S3 bucket. For more information about bucket + // permissions, see User policy examples + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-policies-s3.html) + // in the Amazon S3 User Guide. // // This member is required. IamRoleArn *string diff --git a/service/frauddetector/api_op_CreateBatchPredictionJob.go b/service/frauddetector/api_op_CreateBatchPredictionJob.go index 6963568a0d9..17a9184fb9b 100644 --- a/service/frauddetector/api_op_CreateBatchPredictionJob.go +++ b/service/frauddetector/api_op_CreateBatchPredictionJob.go @@ -39,7 +39,11 @@ type CreateBatchPredictionJobInput struct { // This member is required. EventTypeName *string - // The ARN of the IAM role to use for this job request. + // The ARN of the IAM role to use for this job request. The IAM Role must have read + // permissions to your input S3 bucket and write permissions to your output S3 + // bucket. For more information about bucket permissions, see User policy examples + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-policies-s3.html) + // in the Amazon S3 User Guide. // // This member is required. IamRoleArn *string diff --git a/service/frauddetector/api_op_PutKMSEncryptionKey.go b/service/frauddetector/api_op_PutKMSEncryptionKey.go index c7eac81b815..3bdc986f3dc 100644 --- a/service/frauddetector/api_op_PutKMSEncryptionKey.go +++ b/service/frauddetector/api_op_PutKMSEncryptionKey.go @@ -28,7 +28,8 @@ func (c *Client) PutKMSEncryptionKey(ctx context.Context, params *PutKMSEncrypti type PutKMSEncryptionKeyInput struct { - // The KMS encryption key ARN. + // The KMS encryption key ARN. The KMS key must be single-Region key. Amazon Fraud + // Detector does not support multi-Region KMS key. // // This member is required. KmsEncryptionKeyArn *string diff --git a/service/frauddetector/api_op_UpdateDetectorVersionStatus.go b/service/frauddetector/api_op_UpdateDetectorVersionStatus.go index f42d7cbbe67..5ff5cd44b3c 100644 --- a/service/frauddetector/api_op_UpdateDetectorVersionStatus.go +++ b/service/frauddetector/api_op_UpdateDetectorVersionStatus.go @@ -41,7 +41,7 @@ type UpdateDetectorVersionStatusInput struct { // This member is required. DetectorVersionId *string - // The new status. + // The new status. The only supported values are ACTIVE and INACTIVE // // This member is required. Status types.DetectorVersionStatus diff --git a/service/networkfirewall/internal/endpoints/endpoints.go b/service/networkfirewall/internal/endpoints/endpoints.go index 0fb82210931..a69e901afe6 100644 --- a/service/networkfirewall/internal/endpoints/endpoints.go +++ b/service/networkfirewall/internal/endpoints/endpoints.go @@ -159,6 +159,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ap-southeast-2", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{}, diff --git a/service/sagemaker/api_op_CreateTransformJob.go b/service/sagemaker/api_op_CreateTransformJob.go index fd925c6bed5..693019dc466 100644 --- a/service/sagemaker/api_op_CreateTransformJob.go +++ b/service/sagemaker/api_op_CreateTransformJob.go @@ -97,6 +97,9 @@ type CreateTransformJobInput struct { // MultiRecord and SplitType to Line. BatchStrategy types.BatchStrategy + // Configuration to control how SageMaker captures inference data. + DataCaptureConfig *types.BatchDataCaptureConfig + // The data structure used to specify the data to be used for inference in a batch // transform job and to associate the data that is relevant to the prediction // results in the output. The input filter provided allows you to exclude input diff --git a/service/sagemaker/api_op_DescribeTransformJob.go b/service/sagemaker/api_op_DescribeTransformJob.go index 6564d02f5e5..1b55a112580 100644 --- a/service/sagemaker/api_op_DescribeTransformJob.go +++ b/service/sagemaker/api_op_DescribeTransformJob.go @@ -93,6 +93,9 @@ type DescribeTransformJobOutput struct { // strategy, you must set SplitType to Line, RecordIO, or TFRecord. BatchStrategy types.BatchStrategy + // Configuration to control how SageMaker captures inference data. + DataCaptureConfig *types.BatchDataCaptureConfig + // The data structure used to specify the data to be used for inference in a batch // transform job and to associate the data that is relevant to the prediction // results in the output. The input filter provided allows you to exclude input diff --git a/service/sagemaker/deserializers.go b/service/sagemaker/deserializers.go index 44ca034d471..69987c6c540 100644 --- a/service/sagemaker/deserializers.go +++ b/service/sagemaker/deserializers.go @@ -31420,6 +31420,64 @@ func awsAwsjson11_deserializeDocumentAutoRollbackConfig(v **types.AutoRollbackCo return nil } +func awsAwsjson11_deserializeDocumentBatchDataCaptureConfig(v **types.BatchDataCaptureConfig, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.BatchDataCaptureConfig + if *v == nil { + sv = &types.BatchDataCaptureConfig{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DestinationS3Uri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3Uri to be of type string, got %T instead", value) + } + sv.DestinationS3Uri = ptr.String(jtv) + } + + case "GenerateInferenceId": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.GenerateInferenceId = jtv + } + + case "KmsKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KmsKeyId to be of type string, got %T instead", value) + } + sv.KmsKeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentBatchDescribeModelPackageError(v **types.BatchDescribeModelPackageError, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -31614,6 +31672,157 @@ func awsAwsjson11_deserializeDocumentBatchDescribeModelPackageSummary(v **types. return nil } +func awsAwsjson11_deserializeDocumentBatchTransformInput(v **types.BatchTransformInput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.BatchTransformInput + if *v == nil { + sv = &types.BatchTransformInput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DataCapturedDestinationS3Uri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DestinationS3Uri to be of type string, got %T instead", value) + } + sv.DataCapturedDestinationS3Uri = ptr.String(jtv) + } + + case "DatasetFormat": + if err := awsAwsjson11_deserializeDocumentMonitoringDatasetFormat(&sv.DatasetFormat, value); err != nil { + return err + } + + case "EndTimeOffset": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MonitoringTimeOffsetString to be of type string, got %T instead", value) + } + sv.EndTimeOffset = ptr.String(jtv) + } + + case "FeaturesAttribute": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.FeaturesAttribute = ptr.String(jtv) + } + + case "InferenceAttribute": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.InferenceAttribute = ptr.String(jtv) + } + + case "LocalPath": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ProcessingLocalPath to be of type string, got %T instead", value) + } + sv.LocalPath = ptr.String(jtv) + } + + case "ProbabilityAttribute": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ProbabilityAttribute = ptr.String(jtv) + } + + case "ProbabilityThresholdAttribute": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ProbabilityThresholdAttribute = ptr.Float64(f64) + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.ProbabilityThresholdAttribute = ptr.Float64(f64) + + default: + return fmt.Errorf("expected ProbabilityThresholdAttribute to be a JSON Number, got %T instead", value) + + } + } + + case "S3DataDistributionType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ProcessingS3DataDistributionType to be of type string, got %T instead", value) + } + sv.S3DataDistributionType = types.ProcessingS3DataDistributionType(jtv) + } + + case "S3InputMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ProcessingS3InputMode to be of type string, got %T instead", value) + } + sv.S3InputMode = types.ProcessingS3InputMode(jtv) + } + + case "StartTimeOffset": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MonitoringTimeOffsetString to be of type string, got %T instead", value) + } + sv.StartTimeOffset = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentBias(v **types.Bias, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -35085,6 +35294,11 @@ func awsAwsjson11_deserializeDocumentDataQualityJobInput(v **types.DataQualityJo for key, value := range shape { switch key { + case "BatchTransformInput": + if err := awsAwsjson11_deserializeDocumentBatchTransformInput(&sv.BatchTransformInput, value); err != nil { + return err + } + case "EndpointInput": if err := awsAwsjson11_deserializeDocumentEndpointInput(&sv.EndpointInput, value); err != nil { return err @@ -45660,6 +45874,11 @@ func awsAwsjson11_deserializeDocumentModelBiasJobInput(v **types.ModelBiasJobInp for key, value := range shape { switch key { + case "BatchTransformInput": + if err := awsAwsjson11_deserializeDocumentBatchTransformInput(&sv.BatchTransformInput, value); err != nil { + return err + } + case "EndpointInput": if err := awsAwsjson11_deserializeDocumentEndpointInput(&sv.EndpointInput, value); err != nil { return err @@ -46072,6 +46291,11 @@ func awsAwsjson11_deserializeDocumentModelExplainabilityJobInput(v **types.Model for key, value := range shape { switch key { + case "BatchTransformInput": + if err := awsAwsjson11_deserializeDocumentBatchTransformInput(&sv.BatchTransformInput, value); err != nil { + return err + } + case "EndpointInput": if err := awsAwsjson11_deserializeDocumentEndpointInput(&sv.EndpointInput, value); err != nil { return err @@ -47596,6 +47820,11 @@ func awsAwsjson11_deserializeDocumentModelQualityJobInput(v **types.ModelQuality for key, value := range shape { switch key { + case "BatchTransformInput": + if err := awsAwsjson11_deserializeDocumentBatchTransformInput(&sv.BatchTransformInput, value); err != nil { + return err + } + case "EndpointInput": if err := awsAwsjson11_deserializeDocumentEndpointInput(&sv.EndpointInput, value); err != nil { return err @@ -48023,6 +48252,92 @@ func awsAwsjson11_deserializeDocumentMonitoringContainerArguments(v *[]string, v return nil } +func awsAwsjson11_deserializeDocumentMonitoringCsvDatasetFormat(v **types.MonitoringCsvDatasetFormat, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MonitoringCsvDatasetFormat + if *v == nil { + sv = &types.MonitoringCsvDatasetFormat{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Header": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.Header = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentMonitoringDatasetFormat(v **types.MonitoringDatasetFormat, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MonitoringDatasetFormat + if *v == nil { + sv = &types.MonitoringDatasetFormat{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Csv": + if err := awsAwsjson11_deserializeDocumentMonitoringCsvDatasetFormat(&sv.Csv, value); err != nil { + return err + } + + case "Json": + if err := awsAwsjson11_deserializeDocumentMonitoringJsonDatasetFormat(&sv.Json, value); err != nil { + return err + } + + case "Parquet": + if err := awsAwsjson11_deserializeDocumentMonitoringParquetDatasetFormat(&sv.Parquet, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentMonitoringEnvironmentMap(v *map[string]string, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -48297,6 +48612,11 @@ func awsAwsjson11_deserializeDocumentMonitoringInput(v **types.MonitoringInput, for key, value := range shape { switch key { + case "BatchTransformInput": + if err := awsAwsjson11_deserializeDocumentBatchTransformInput(&sv.BatchTransformInput, value); err != nil { + return err + } + case "EndpointInput": if err := awsAwsjson11_deserializeDocumentEndpointInput(&sv.EndpointInput, value); err != nil { return err @@ -48533,6 +48853,46 @@ func awsAwsjson11_deserializeDocumentMonitoringJobDefinitionSummaryList(v *[]typ return nil } +func awsAwsjson11_deserializeDocumentMonitoringJsonDatasetFormat(v **types.MonitoringJsonDatasetFormat, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MonitoringJsonDatasetFormat + if *v == nil { + sv = &types.MonitoringJsonDatasetFormat{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Line": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.Line = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentMonitoringNetworkConfig(v **types.MonitoringNetworkConfig, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -48702,6 +49062,37 @@ func awsAwsjson11_deserializeDocumentMonitoringOutputs(v *[]types.MonitoringOutp return nil } +func awsAwsjson11_deserializeDocumentMonitoringParquetDatasetFormat(v **types.MonitoringParquetDatasetFormat, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MonitoringParquetDatasetFormat + if *v == nil { + sv = &types.MonitoringParquetDatasetFormat{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentMonitoringResources(v **types.MonitoringResources, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -70841,6 +71232,11 @@ func awsAwsjson11_deserializeOpDocumentDescribeTransformJobOutput(v **DescribeTr } } + case "DataCaptureConfig": + if err := awsAwsjson11_deserializeDocumentBatchDataCaptureConfig(&sv.DataCaptureConfig, value); err != nil { + return err + } + case "DataProcessing": if err := awsAwsjson11_deserializeDocumentDataProcessing(&sv.DataProcessing, value); err != nil { return err diff --git a/service/sagemaker/serializers.go b/service/sagemaker/serializers.go index 8e537755b13..e9f8c3ff751 100644 --- a/service/sagemaker/serializers.go +++ b/service/sagemaker/serializers.go @@ -15159,6 +15159,105 @@ func awsAwsjson11_serializeDocumentAutoRollbackConfig(v *types.AutoRollbackConfi return nil } +func awsAwsjson11_serializeDocumentBatchDataCaptureConfig(v *types.BatchDataCaptureConfig, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DestinationS3Uri != nil { + ok := object.Key("DestinationS3Uri") + ok.String(*v.DestinationS3Uri) + } + + if v.GenerateInferenceId { + ok := object.Key("GenerateInferenceId") + ok.Boolean(v.GenerateInferenceId) + } + + if v.KmsKeyId != nil { + ok := object.Key("KmsKeyId") + ok.String(*v.KmsKeyId) + } + + return nil +} + +func awsAwsjson11_serializeDocumentBatchTransformInput(v *types.BatchTransformInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DataCapturedDestinationS3Uri != nil { + ok := object.Key("DataCapturedDestinationS3Uri") + ok.String(*v.DataCapturedDestinationS3Uri) + } + + if v.DatasetFormat != nil { + ok := object.Key("DatasetFormat") + if err := awsAwsjson11_serializeDocumentMonitoringDatasetFormat(v.DatasetFormat, ok); err != nil { + return err + } + } + + if v.EndTimeOffset != nil { + ok := object.Key("EndTimeOffset") + ok.String(*v.EndTimeOffset) + } + + if v.FeaturesAttribute != nil { + ok := object.Key("FeaturesAttribute") + ok.String(*v.FeaturesAttribute) + } + + if v.InferenceAttribute != nil { + ok := object.Key("InferenceAttribute") + ok.String(*v.InferenceAttribute) + } + + if v.LocalPath != nil { + ok := object.Key("LocalPath") + ok.String(*v.LocalPath) + } + + if v.ProbabilityAttribute != nil { + ok := object.Key("ProbabilityAttribute") + ok.String(*v.ProbabilityAttribute) + } + + if v.ProbabilityThresholdAttribute != nil { + ok := object.Key("ProbabilityThresholdAttribute") + switch { + case math.IsNaN(*v.ProbabilityThresholdAttribute): + ok.String("NaN") + + case math.IsInf(*v.ProbabilityThresholdAttribute, 1): + ok.String("Infinity") + + case math.IsInf(*v.ProbabilityThresholdAttribute, -1): + ok.String("-Infinity") + + default: + ok.Double(*v.ProbabilityThresholdAttribute) + + } + } + + if len(v.S3DataDistributionType) > 0 { + ok := object.Key("S3DataDistributionType") + ok.String(string(v.S3DataDistributionType)) + } + + if len(v.S3InputMode) > 0 { + ok := object.Key("S3InputMode") + ok.String(string(v.S3InputMode)) + } + + if v.StartTimeOffset != nil { + ok := object.Key("StartTimeOffset") + ok.String(*v.StartTimeOffset) + } + + return nil +} + func awsAwsjson11_serializeDocumentBias(v *types.Bias, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -16222,6 +16321,13 @@ func awsAwsjson11_serializeDocumentDataQualityJobInput(v *types.DataQualityJobIn object := value.Object() defer object.Close() + if v.BatchTransformInput != nil { + ok := object.Key("BatchTransformInput") + if err := awsAwsjson11_serializeDocumentBatchTransformInput(v.BatchTransformInput, ok); err != nil { + return err + } + } + if v.EndpointInput != nil { ok := object.Key("EndpointInput") if err := awsAwsjson11_serializeDocumentEndpointInput(v.EndpointInput, ok); err != nil { @@ -18595,6 +18701,13 @@ func awsAwsjson11_serializeDocumentModelBiasJobInput(v *types.ModelBiasJobInput, object := value.Object() defer object.Close() + if v.BatchTransformInput != nil { + ok := object.Key("BatchTransformInput") + if err := awsAwsjson11_serializeDocumentBatchTransformInput(v.BatchTransformInput, ok); err != nil { + return err + } + } + if v.EndpointInput != nil { ok := object.Key("EndpointInput") if err := awsAwsjson11_serializeDocumentEndpointInput(v.EndpointInput, ok); err != nil { @@ -18714,6 +18827,13 @@ func awsAwsjson11_serializeDocumentModelExplainabilityJobInput(v *types.ModelExp object := value.Object() defer object.Close() + if v.BatchTransformInput != nil { + ok := object.Key("BatchTransformInput") + if err := awsAwsjson11_serializeDocumentBatchTransformInput(v.BatchTransformInput, ok); err != nil { + return err + } + } + if v.EndpointInput != nil { ok := object.Key("EndpointInput") if err := awsAwsjson11_serializeDocumentEndpointInput(v.EndpointInput, ok); err != nil { @@ -19073,6 +19193,13 @@ func awsAwsjson11_serializeDocumentModelQualityJobInput(v *types.ModelQualityJob object := value.Object() defer object.Close() + if v.BatchTransformInput != nil { + ok := object.Key("BatchTransformInput") + if err := awsAwsjson11_serializeDocumentBatchTransformInput(v.BatchTransformInput, ok); err != nil { + return err + } + } + if v.EndpointInput != nil { ok := object.Key("EndpointInput") if err := awsAwsjson11_serializeDocumentEndpointInput(v.EndpointInput, ok); err != nil { @@ -19202,6 +19329,46 @@ func awsAwsjson11_serializeDocumentMonitoringContainerArguments(v []string, valu return nil } +func awsAwsjson11_serializeDocumentMonitoringCsvDatasetFormat(v *types.MonitoringCsvDatasetFormat, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Header { + ok := object.Key("Header") + ok.Boolean(v.Header) + } + + return nil +} + +func awsAwsjson11_serializeDocumentMonitoringDatasetFormat(v *types.MonitoringDatasetFormat, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Csv != nil { + ok := object.Key("Csv") + if err := awsAwsjson11_serializeDocumentMonitoringCsvDatasetFormat(v.Csv, ok); err != nil { + return err + } + } + + if v.Json != nil { + ok := object.Key("Json") + if err := awsAwsjson11_serializeDocumentMonitoringJsonDatasetFormat(v.Json, ok); err != nil { + return err + } + } + + if v.Parquet != nil { + ok := object.Key("Parquet") + if err := awsAwsjson11_serializeDocumentMonitoringParquetDatasetFormat(v.Parquet, ok); err != nil { + return err + } + } + + return nil +} + func awsAwsjson11_serializeDocumentMonitoringEnvironmentMap(v map[string]string, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -19229,6 +19396,13 @@ func awsAwsjson11_serializeDocumentMonitoringInput(v *types.MonitoringInput, val object := value.Object() defer object.Close() + if v.BatchTransformInput != nil { + ok := object.Key("BatchTransformInput") + if err := awsAwsjson11_serializeDocumentBatchTransformInput(v.BatchTransformInput, ok); err != nil { + return err + } + } + if v.EndpointInput != nil { ok := object.Key("EndpointInput") if err := awsAwsjson11_serializeDocumentEndpointInput(v.EndpointInput, ok); err != nil { @@ -19320,6 +19494,18 @@ func awsAwsjson11_serializeDocumentMonitoringJobDefinition(v *types.MonitoringJo return nil } +func awsAwsjson11_serializeDocumentMonitoringJsonDatasetFormat(v *types.MonitoringJsonDatasetFormat, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Line { + ok := object.Key("Line") + ok.Boolean(v.Line) + } + + return nil +} + func awsAwsjson11_serializeDocumentMonitoringNetworkConfig(v *types.MonitoringNetworkConfig, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -19390,6 +19576,13 @@ func awsAwsjson11_serializeDocumentMonitoringOutputs(v []types.MonitoringOutput, return nil } +func awsAwsjson11_serializeDocumentMonitoringParquetDatasetFormat(v *types.MonitoringParquetDatasetFormat, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + return nil +} + func awsAwsjson11_serializeDocumentMonitoringResources(v *types.MonitoringResources, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -24538,6 +24731,13 @@ func awsAwsjson11_serializeOpDocumentCreateTransformJobInput(v *CreateTransformJ ok.String(string(v.BatchStrategy)) } + if v.DataCaptureConfig != nil { + ok := object.Key("DataCaptureConfig") + if err := awsAwsjson11_serializeDocumentBatchDataCaptureConfig(v.DataCaptureConfig, ok); err != nil { + return err + } + } + if v.DataProcessing != nil { ok := object.Key("DataProcessing") if err := awsAwsjson11_serializeDocumentDataProcessing(v.DataProcessing, ok); err != nil { diff --git a/service/sagemaker/types/types.go b/service/sagemaker/types/types.go index 70ac1ca1aea..3d1e8761511 100644 --- a/service/sagemaker/types/types.go +++ b/service/sagemaker/types/types.go @@ -1942,6 +1942,39 @@ type AutoRollbackConfig struct { noSmithyDocumentSerde } +// Configuration to control how SageMaker captures inference data for batch +// transform jobs. +type BatchDataCaptureConfig struct { + + // The Amazon S3 location being used to capture the data. + // + // This member is required. + DestinationS3Uri *string + + // Flag that indicates whether to append inference id to the output. + GenerateInferenceId bool + + // The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service + // key that SageMaker uses to encrypt data on the storage volume attached to the ML + // compute instance that hosts the batch transform job. The KmsKeyId can be any of + // the following formats: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key + // ARN: + // arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias name ARN: + // arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias + KmsKeyId *string + + noSmithyDocumentSerde +} + // The error code and error description associated with the resource. type BatchDescribeModelPackageError struct { @@ -1998,6 +2031,62 @@ type BatchDescribeModelPackageSummary struct { noSmithyDocumentSerde } +// Input object for the batch transform job. +type BatchTransformInput struct { + + // The Amazon S3 location being used to capture the data. + // + // This member is required. + DataCapturedDestinationS3Uri *string + + // The dataset format for your batch transform job. + // + // This member is required. + DatasetFormat *MonitoringDatasetFormat + + // Path to the filesystem where the batch transform data is available to the + // container. + // + // This member is required. + LocalPath *string + + // If specified, monitoring jobs substract this time from the end time. For + // information about using offsets for scheduling monitoring jobs, see Schedule + // Model Quality Monitoring Jobs + // (https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-model-quality-schedule.html). + EndTimeOffset *string + + // The attributes of the input data that are the input features. + FeaturesAttribute *string + + // The attribute of the input data that represents the ground truth label. + InferenceAttribute *string + + // In a classification problem, the attribute that represents the class + // probability. + ProbabilityAttribute *string + + // The threshold for the class probability to be evaluated as a positive result. + ProbabilityThresholdAttribute *float64 + + // Whether input data distributed in Amazon S3 is fully replicated or sharded by an + // S3 key. Defaults to FullyReplicated + S3DataDistributionType ProcessingS3DataDistributionType + + // Whether the Pipe or File is used as the input mode for transferring data for the + // monitoring job. Pipe mode is recommended for large datasets. File mode is useful + // for small files that fit in memory. Defaults to File. + S3InputMode ProcessingS3InputMode + + // If specified, monitoring jobs substract this time from the start time. For + // information about using offsets for scheduling monitoring jobs, see Schedule + // Model Quality Monitoring Jobs + // (https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-model-quality-schedule.html). + StartTimeOffset *string + + noSmithyDocumentSerde +} + // Contains bias metrics for a model. type Bias struct { @@ -3102,9 +3191,10 @@ type DataQualityBaselineConfig struct { // for input. type DataQualityJobInput struct { + // Input object for the batch transform job. + BatchTransformInput *BatchTransformInput + // Input object for the endpoint - // - // This member is required. EndpointInput *EndpointInput noSmithyDocumentSerde @@ -8014,16 +8104,17 @@ type ModelBiasBaselineConfig struct { // Inputs for the model bias job. type ModelBiasJobInput struct { - // Input object for the endpoint - // - // This member is required. - EndpointInput *EndpointInput - // Location of ground truth labels to use in model bias job. // // This member is required. GroundTruthS3Input *MonitoringGroundTruthS3Input + // Input object for the batch transform job. + BatchTransformInput *BatchTransformInput + + // Input object for the endpoint + EndpointInput *EndpointInput + noSmithyDocumentSerde } @@ -8142,9 +8233,10 @@ type ModelExplainabilityBaselineConfig struct { // Inputs for the model explainability job. type ModelExplainabilityJobInput struct { + // Input object for the batch transform job. + BatchTransformInput *BatchTransformInput + // Input object for the endpoint - // - // This member is required. EndpointInput *EndpointInput noSmithyDocumentSerde @@ -8680,16 +8772,17 @@ type ModelQualityBaselineConfig struct { // for input for model quality monitoring jobs. type ModelQualityJobInput struct { - // Input object for the endpoint - // - // This member is required. - EndpointInput *EndpointInput - // The ground truth label provided for the model. // // This member is required. GroundTruthS3Input *MonitoringGroundTruthS3Input + // Input object for the batch transform job. + BatchTransformInput *BatchTransformInput + + // Input object for the endpoint + EndpointInput *EndpointInput + noSmithyDocumentSerde } @@ -8807,6 +8900,30 @@ type MonitoringConstraintsResource struct { noSmithyDocumentSerde } +// Represents the CSV dataset format used when running a monitoring job. +type MonitoringCsvDatasetFormat struct { + + // Indicates if the CSV data has a header. + Header bool + + noSmithyDocumentSerde +} + +// Represents the dataset format used when running a monitoring job. +type MonitoringDatasetFormat struct { + + // The CSV dataset used in the monitoring job. + Csv *MonitoringCsvDatasetFormat + + // The JSON dataset used in the monitoring job + Json *MonitoringJsonDatasetFormat + + // The Parquet dataset used in the monitoring job + Parquet *MonitoringParquetDatasetFormat + + noSmithyDocumentSerde +} + // Summary of information about the last monitoring job to run. type MonitoringExecutionSummary struct { @@ -8865,9 +8982,10 @@ type MonitoringGroundTruthS3Input struct { // The inputs for a monitoring job. type MonitoringInput struct { + // Input object for the batch transform job. + BatchTransformInput *BatchTransformInput + // The endpoint for a monitoring job. - // - // This member is required. EndpointInput *EndpointInput noSmithyDocumentSerde @@ -8948,6 +9066,15 @@ type MonitoringJobDefinitionSummary struct { noSmithyDocumentSerde } +// Represents the JSON dataset format used when running a monitoring job. +type MonitoringJsonDatasetFormat struct { + + // Indicates if the file should be read as a json object per line. + Line bool + + noSmithyDocumentSerde +} + // The networking configuration for the monitoring job. type MonitoringNetworkConfig struct { @@ -8999,6 +9126,11 @@ type MonitoringOutputConfig struct { noSmithyDocumentSerde } +// Represents the Parquet dataset format used when running a monitoring job. +type MonitoringParquetDatasetFormat struct { + noSmithyDocumentSerde +} + // Identifies the resources to deploy for a monitoring job. type MonitoringResources struct { diff --git a/service/sagemaker/validators.go b/service/sagemaker/validators.go index 86b3fa13bb2..29b66dcc1e3 100644 --- a/service/sagemaker/validators.go +++ b/service/sagemaker/validators.go @@ -5452,6 +5452,42 @@ func validateAutoMLSecurityConfig(v *types.AutoMLSecurityConfig) error { } } +func validateBatchDataCaptureConfig(v *types.BatchDataCaptureConfig) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchDataCaptureConfig"} + if v.DestinationS3Uri == nil { + invalidParams.Add(smithy.NewErrParamRequired("DestinationS3Uri")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateBatchTransformInput(v *types.BatchTransformInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchTransformInput"} + if v.DataCapturedDestinationS3Uri == nil { + invalidParams.Add(smithy.NewErrParamRequired("DataCapturedDestinationS3Uri")) + } + if v.DatasetFormat == nil { + invalidParams.Add(smithy.NewErrParamRequired("DatasetFormat")) + } + if v.LocalPath == nil { + invalidParams.Add(smithy.NewErrParamRequired("LocalPath")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateBias(v *types.Bias) error { if v == nil { return nil @@ -6015,13 +6051,16 @@ func validateDataQualityJobInput(v *types.DataQualityJobInput) error { return nil } invalidParams := smithy.InvalidParamsError{Context: "DataQualityJobInput"} - if v.EndpointInput == nil { - invalidParams.Add(smithy.NewErrParamRequired("EndpointInput")) - } else if v.EndpointInput != nil { + if v.EndpointInput != nil { if err := validateEndpointInput(v.EndpointInput); err != nil { invalidParams.AddNested("EndpointInput", err.(smithy.InvalidParamsError)) } } + if v.BatchTransformInput != nil { + if err := validateBatchTransformInput(v.BatchTransformInput); err != nil { + invalidParams.AddNested("BatchTransformInput", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { @@ -7610,13 +7649,16 @@ func validateModelBiasJobInput(v *types.ModelBiasJobInput) error { return nil } invalidParams := smithy.InvalidParamsError{Context: "ModelBiasJobInput"} - if v.EndpointInput == nil { - invalidParams.Add(smithy.NewErrParamRequired("EndpointInput")) - } else if v.EndpointInput != nil { + if v.EndpointInput != nil { if err := validateEndpointInput(v.EndpointInput); err != nil { invalidParams.AddNested("EndpointInput", err.(smithy.InvalidParamsError)) } } + if v.BatchTransformInput != nil { + if err := validateBatchTransformInput(v.BatchTransformInput); err != nil { + invalidParams.AddNested("BatchTransformInput", err.(smithy.InvalidParamsError)) + } + } if v.GroundTruthS3Input == nil { invalidParams.Add(smithy.NewErrParamRequired("GroundTruthS3Input")) } @@ -7672,13 +7714,16 @@ func validateModelExplainabilityJobInput(v *types.ModelExplainabilityJobInput) e return nil } invalidParams := smithy.InvalidParamsError{Context: "ModelExplainabilityJobInput"} - if v.EndpointInput == nil { - invalidParams.Add(smithy.NewErrParamRequired("EndpointInput")) - } else if v.EndpointInput != nil { + if v.EndpointInput != nil { if err := validateEndpointInput(v.EndpointInput); err != nil { invalidParams.AddNested("EndpointInput", err.(smithy.InvalidParamsError)) } } + if v.BatchTransformInput != nil { + if err := validateBatchTransformInput(v.BatchTransformInput); err != nil { + invalidParams.AddNested("BatchTransformInput", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { @@ -7925,13 +7970,16 @@ func validateModelQualityJobInput(v *types.ModelQualityJobInput) error { return nil } invalidParams := smithy.InvalidParamsError{Context: "ModelQualityJobInput"} - if v.EndpointInput == nil { - invalidParams.Add(smithy.NewErrParamRequired("EndpointInput")) - } else if v.EndpointInput != nil { + if v.EndpointInput != nil { if err := validateEndpointInput(v.EndpointInput); err != nil { invalidParams.AddNested("EndpointInput", err.(smithy.InvalidParamsError)) } } + if v.BatchTransformInput != nil { + if err := validateBatchTransformInput(v.BatchTransformInput); err != nil { + invalidParams.AddNested("BatchTransformInput", err.(smithy.InvalidParamsError)) + } + } if v.GroundTruthS3Input == nil { invalidParams.Add(smithy.NewErrParamRequired("GroundTruthS3Input")) } @@ -7983,13 +8031,16 @@ func validateMonitoringInput(v *types.MonitoringInput) error { return nil } invalidParams := smithy.InvalidParamsError{Context: "MonitoringInput"} - if v.EndpointInput == nil { - invalidParams.Add(smithy.NewErrParamRequired("EndpointInput")) - } else if v.EndpointInput != nil { + if v.EndpointInput != nil { if err := validateEndpointInput(v.EndpointInput); err != nil { invalidParams.AddNested("EndpointInput", err.(smithy.InvalidParamsError)) } } + if v.BatchTransformInput != nil { + if err := validateBatchTransformInput(v.BatchTransformInput); err != nil { + invalidParams.AddNested("BatchTransformInput", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { @@ -11344,6 +11395,11 @@ func validateOpCreateTransformJobInput(v *CreateTransformJobInput) error { invalidParams.AddNested("TransformOutput", err.(smithy.InvalidParamsError)) } } + if v.DataCaptureConfig != nil { + if err := validateBatchDataCaptureConfig(v.DataCaptureConfig); err != nil { + invalidParams.AddNested("DataCaptureConfig", err.(smithy.InvalidParamsError)) + } + } if v.TransformResources == nil { invalidParams.Add(smithy.NewErrParamRequired("TransformResources")) } else if v.TransformResources != nil { diff --git a/service/servicediscovery/api_op_CreateHttpNamespace.go b/service/servicediscovery/api_op_CreateHttpNamespace.go index 6077ab8f6be..88b0537f28b 100644 --- a/service/servicediscovery/api_op_CreateHttpNamespace.go +++ b/service/servicediscovery/api_op_CreateHttpNamespace.go @@ -15,7 +15,7 @@ import ( // Creates an HTTP namespace. Service instances registered using an HTTP namespace // can be discovered using a DiscoverInstances request but can't be discovered // using DNS. For the current quota on the number of namespaces that you can create -// using the same account, see Cloud Map quotas +// using the same Amazon Web Services account, see Cloud Map quotas // (https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html) in the // Cloud Map Developer Guide. func (c *Client) CreateHttpNamespace(ctx context.Context, params *CreateHttpNamespaceInput, optFns ...func(*Options)) (*CreateHttpNamespaceOutput, error) { diff --git a/service/servicediscovery/api_op_CreatePrivateDnsNamespace.go b/service/servicediscovery/api_op_CreatePrivateDnsNamespace.go index a787d93bb16..227d2efdd66 100644 --- a/service/servicediscovery/api_op_CreatePrivateDnsNamespace.go +++ b/service/servicediscovery/api_op_CreatePrivateDnsNamespace.go @@ -18,7 +18,8 @@ import ( // the resulting DNS name for the service is backend.example.com. Service instances // that are registered using a private DNS namespace can be discovered using either // a DiscoverInstances request or using DNS. For the current quota on the number of -// namespaces that you can create using the same account, see Cloud Map quotas +// namespaces that you can create using the same Amazon Web Services account, see +// Cloud Map quotas // (https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html) in the // Cloud Map Developer Guide. func (c *Client) CreatePrivateDnsNamespace(ctx context.Context, params *CreatePrivateDnsNamespaceInput, optFns ...func(*Options)) (*CreatePrivateDnsNamespaceOutput, error) { diff --git a/service/servicediscovery/api_op_CreatePublicDnsNamespace.go b/service/servicediscovery/api_op_CreatePublicDnsNamespace.go index 8e246e81e8e..3e4ef152e15 100644 --- a/service/servicediscovery/api_op_CreatePublicDnsNamespace.go +++ b/service/servicediscovery/api_op_CreatePublicDnsNamespace.go @@ -18,9 +18,10 @@ import ( // the service is backend.example.com. You can discover instances that were // registered with a public DNS namespace by using either a DiscoverInstances // request or using DNS. For the current quota on the number of namespaces that you -// can create using the same account, see Cloud Map quotas +// can create using the same Amazon Web Services account, see Cloud Map quotas // (https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html) in the -// Cloud Map Developer Guide. +// Cloud Map Developer Guide. The CreatePublicDnsNamespace API operation is not +// supported in the Amazon Web Services GovCloud (US) Regions. func (c *Client) CreatePublicDnsNamespace(ctx context.Context, params *CreatePublicDnsNamespaceInput, optFns ...func(*Options)) (*CreatePublicDnsNamespaceOutput, error) { if params == nil { params = &CreatePublicDnsNamespaceInput{} @@ -38,7 +39,8 @@ func (c *Client) CreatePublicDnsNamespace(ctx context.Context, params *CreatePub type CreatePublicDnsNamespaceInput struct { - // The name that you want to assign to this namespace. + // The name that you want to assign to this namespace. Do not include sensitive + // information in the name. The name is publicly available using DNS queries. // // This member is required. Name *string diff --git a/service/servicediscovery/api_op_CreateService.go b/service/servicediscovery/api_op_CreateService.go index e6c5703790d..8314ac1de57 100644 --- a/service/servicediscovery/api_op_CreateService.go +++ b/service/servicediscovery/api_op_CreateService.go @@ -56,25 +56,26 @@ func (c *Client) CreateService(ctx context.Context, params *CreateServiceInput, type CreateServiceInput struct { - // The name that you want to assign to the service. If you want Cloud Map to create - // an SRV record when you register an instance and you're using a system that - // requires a specific SRV format, such as HAProxy (http://www.haproxy.org/), - // specify the following for Name: + // The name that you want to assign to the service. Do not include sensitive + // information in the name if the namespace is discoverable by public DNS queries. + // If you want Cloud Map to create an SRV record when you register an instance and + // you're using a system that requires a specific SRV format, such as HAProxy + // (http://www.haproxy.org/), specify the following for Name: // - // * Start the name with an underscore (_), such - // as _exampleservice. + // * Start the name + // with an underscore (_), such as _exampleservice. // - // * End the name with ._protocol, such as ._tcp. + // * End the name with + // ._protocol, such as ._tcp. // - // When you - // register an instance, Cloud Map creates an SRV record and assigns a name to the - // record by concatenating the service name and the namespace name (for example, - // _exampleservice._tcp.example.com). For services that are accessible by DNS - // queries, you can't create multiple services with names that differ only by case - // (such as EXAMPLE and example). Otherwise, these services have the same DNS name - // and can't be distinguished. However, if you use a namespace that's only - // accessible by API calls, then you can create services that with names that - // differ only by case. + // When you register an instance, Cloud Map creates an + // SRV record and assigns a name to the record by concatenating the service name + // and the namespace name (for example, _exampleservice._tcp.example.com). For + // services that are accessible by DNS queries, you can't create multiple services + // with names that differ only by case (such as EXAMPLE and example). Otherwise, + // these services have the same DNS name and can't be distinguished. However, if + // you use a namespace that's only accessible by API calls, then you can create + // services that with names that differ only by case. // // This member is required. Name *string diff --git a/service/servicediscovery/api_op_ListNamespaces.go b/service/servicediscovery/api_op_ListNamespaces.go index 5e3264fac3e..a10299a511a 100644 --- a/service/servicediscovery/api_op_ListNamespaces.go +++ b/service/servicediscovery/api_op_ListNamespaces.go @@ -13,7 +13,7 @@ import ( ) // Lists summary information about the namespaces that were created by the current -// account. +// Amazon Web Services account. func (c *Client) ListNamespaces(ctx context.Context, params *ListNamespacesInput, optFns ...func(*Options)) (*ListNamespacesOutput, error) { if params == nil { params = &ListNamespacesInput{} diff --git a/service/servicediscovery/api_op_RegisterInstance.go b/service/servicediscovery/api_op_RegisterInstance.go index 3356a10c63a..9d4f2a32631 100644 --- a/service/servicediscovery/api_op_RegisterInstance.go +++ b/service/servicediscovery/api_op_RegisterInstance.go @@ -71,12 +71,13 @@ type RegisterInstanceInput struct { // // * For each attribute, the applicable value. // - // Supported - // attribute keys include the following: AWS_ALIAS_DNS_NAME If you want Cloud Map - // to create an Amazon Route 53 alias record that routes traffic to an Elastic Load - // Balancing load balancer, specify the DNS name that's associated with the load - // balancer. For information about how to get the DNS name, see "DNSName" in the - // topic AliasTarget + // Do not + // include sensitive information in the attributes if the namespace is discoverable + // by public DNS queries. Supported attribute keys include the following: + // AWS_ALIAS_DNS_NAME If you want Cloud Map to create an Amazon Route 53 alias + // record that routes traffic to an Elastic Load Balancing load balancer, specify + // the DNS name that's associated with the load balancer. For information about how + // to get the DNS name, see "DNSName" in the topic AliasTarget // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html) // in the Route 53 API Reference. Note the following: // @@ -157,6 +158,11 @@ type RegisterInstanceInput struct { // new one. The health check isn't deleted immediately, so it will still appear for // a while if you submit a ListHealthChecks request, for example. // + // Do not include + // sensitive information in InstanceId if the namespace is discoverable by public + // DNS queries and any Type member of DnsRecord for the service contains SRV + // because the InstanceId is discoverable by public DNS queries. + // // This member is required. InstanceId *string diff --git a/service/servicediscovery/types/enums.go b/service/servicediscovery/types/enums.go index 2e4fef3fd73..c02abcdfc2b 100644 --- a/service/servicediscovery/types/enums.go +++ b/service/servicediscovery/types/enums.go @@ -24,9 +24,10 @@ type FilterCondition string // Enum values for FilterCondition const ( - FilterConditionEq FilterCondition = "EQ" - FilterConditionIn FilterCondition = "IN" - FilterConditionBetween FilterCondition = "BETWEEN" + FilterConditionEq FilterCondition = "EQ" + FilterConditionIn FilterCondition = "IN" + FilterConditionBetween FilterCondition = "BETWEEN" + FilterConditionBeginsWith FilterCondition = "BEGINS_WITH" ) // Values returns all known values for FilterCondition. Note that this can be @@ -37,6 +38,7 @@ func (FilterCondition) Values() []FilterCondition { "EQ", "IN", "BETWEEN", + "BEGINS_WITH", } } @@ -106,7 +108,9 @@ type NamespaceFilterName string // Enum values for NamespaceFilterName const ( - NamespaceFilterNameType NamespaceFilterName = "TYPE" + NamespaceFilterNameType NamespaceFilterName = "TYPE" + NamespaceFilterNameName NamespaceFilterName = "NAME" + NamespaceFilterNameHttpName NamespaceFilterName = "HTTP_NAME" ) // Values returns all known values for NamespaceFilterName. Note that this can be @@ -115,6 +119,8 @@ const ( func (NamespaceFilterName) Values() []NamespaceFilterName { return []NamespaceFilterName{ "TYPE", + "NAME", + "HTTP_NAME", } } diff --git a/service/servicediscovery/types/types.go b/service/servicediscovery/types/types.go index 4b551863433..af0ff90ae02 100644 --- a/service/servicediscovery/types/types.go +++ b/service/servicediscovery/types/types.go @@ -8,7 +8,9 @@ import ( ) // A complex type that contains information about the Amazon Route 53 DNS records -// that you want Cloud Map to create when you register an instance. +// that you want Cloud Map to create when you register an instance. The record +// types of a service can only be changed by deleting the service and recreating it +// with a new Dnsconfig. type DnsConfig struct { // An array that contains one DnsRecord object for each Route 53 DNS record that @@ -17,7 +19,9 @@ type DnsConfig struct { // This member is required. DnsRecords []DnsRecord - // The ID of the namespace to use for DNS configuration. + // Use NamespaceId in Service + // (https://docs.aws.amazon.com/cloud-map/latest/api/API_Service.html) instead. The + // ID of the namespace to use for DNS configuration. // // Deprecated: Top level attribute in request should be used to reference // namespace-id @@ -424,12 +428,13 @@ type Instance struct { // // * For each attribute, the applicable value. // - // Supported - // attribute keys include the following: AWS_ALIAS_DNS_NAME If you want Cloud Map - // to create a Route 53 alias record that routes traffic to an Elastic Load - // Balancing load balancer, specify the DNS name that's associated with the load - // balancer. For information about how to get the DNS name, see - // AliasTarget->DNSName + // Do not + // include sensitive information in the attributes if the namespace is discoverable + // by public DNS queries. Supported attribute keys include the following: + // AWS_ALIAS_DNS_NAME If you want Cloud Map to create a Route 53 alias record that + // routes traffic to an Elastic Load Balancing load balancer, specify the DNS name + // that's associated with the load balancer. For information about how to get the + // DNS name, see AliasTarget->DNSName // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html#Route53-Type-AliasTarget-DNSName) // in the Route 53 API Reference. Note the following: // @@ -577,23 +582,45 @@ type Namespace struct { // choose to list public or private namespaces. type NamespaceFilter struct { - // Specify TYPE. + // Specify the namespaces that you want to get using one of the following. + // + // * TYPE: + // Gets the namespaces of the specified type. + // + // * NAME: Gets the namespaces with the + // specified name. + // + // * HTTP_NAME: Gets the namespaces with the specified HTTP name. // // This member is required. Name NamespaceFilterName - // If you specify EQ for Condition, specify either DNS_PUBLIC or DNS_PRIVATE. If - // you specify IN for Condition, you can specify DNS_PUBLIC, DNS_PRIVATE, or both. + // Specify the values that are applicable to the value that you specify for + // Name. + // + // * TYPE: Specify HTTP, DNS_PUBLIC, or DNS_PRIVATE. + // + // * NAME: Specify the + // name of the namespace, which is found in Namespace.Name. + // + // * HTTP_NAME: Specify + // the HTTP name of the namespace, which is found in + // Namespace.Properties.HttpProperties.HttpName. // // This member is required. Values []string - // The operator that you want to use to determine whether ListNamespaces returns a - // namespace. Valid values for condition include: EQ When you specify EQ for the - // condition, you can choose to list only public namespaces or private namespaces, - // but not both. EQ is the default condition and can be omitted. IN When you - // specify IN for the condition, you can choose to list public namespaces, private - // namespaces, or both. BETWEEN Not applicable + // Specify the operator that you want to use to determine whether a namespace + // matches the specified value. Valid values for Condition are one of the + // following. + // + // * EQ: When you specify EQ for Condition, you can specify only one + // value. EQ is supported for TYPE, NAME, and HTTP_NAME. EQ is the default + // condition and can be omitted. + // + // * BEGINS_WITH: When you specify BEGINS_WITH for + // Condition, you can specify only one value. BEGINS_WITH is supported for TYPE, + // NAME, and HTTP_NAME. Condition FilterCondition noSmithyDocumentSerde @@ -932,7 +959,9 @@ type Service struct { Description *string // A complex type that contains information about the Route 53 DNS records that you - // want Cloud Map to create when you register an instance. + // want Cloud Map to create when you register an instance. The record types of a + // service can only be changed by deleting the service and recreating it with a new + // Dnsconfig. DnsConfig *DnsConfig // Public DNS and HTTP namespaces only. A complex type that contains settings for @@ -1010,12 +1039,6 @@ type ServiceFilter struct { // * EQ: When you // specify EQ, specify one namespace ID for Values. EQ is the default condition and // can be omitted. - // - // * IN: When you specify IN, specify a list of the IDs for the - // namespaces that you want ListServices to return a list of services for. - // - // * - // BETWEEN: Not applicable. Condition FilterCondition noSmithyDocumentSerde diff --git a/service/sesv2/api_op_CreateCustomVerificationEmailTemplate.go b/service/sesv2/api_op_CreateCustomVerificationEmailTemplate.go index 0dc67705d40..2a996abd9b1 100644 --- a/service/sesv2/api_op_CreateCustomVerificationEmailTemplate.go +++ b/service/sesv2/api_op_CreateCustomVerificationEmailTemplate.go @@ -11,9 +11,9 @@ import ( ) // Creates a new custom verification email template. For more information about -// custom verification email templates, see Using Custom Verification Email -// Templates -// (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-verify-address-custom.html) +// custom verification email templates, see Using custom verification email +// templates +// (https://docs.aws.amazon.com/ses/latest/dg/creating-identities.html#send-email-verify-address-custom) // in the Amazon SES Developer Guide. You can execute this operation no more than // once per second. func (c *Client) CreateCustomVerificationEmailTemplate(ctx context.Context, params *CreateCustomVerificationEmailTemplateInput, optFns ...func(*Options)) (*CreateCustomVerificationEmailTemplateOutput, error) { @@ -53,8 +53,8 @@ type CreateCustomVerificationEmailTemplateInput struct { // The content of the custom verification email. The total size of the email must // be less than 10 MB. The message body may contain HTML, with some limitations. - // For more information, see Custom Verification Email Frequently Asked Questions - // (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-verify-address-custom.html#custom-verification-emails-faq) + // For more information, see Custom verification email frequently asked questions + // (https://docs.aws.amazon.com/ses/latest/dg/creating-identities.html#send-email-verify-address-custom-faq) // in the Amazon SES Developer Guide. // // This member is required. diff --git a/service/sesv2/api_op_CreateDedicatedIpPool.go b/service/sesv2/api_op_CreateDedicatedIpPool.go index 8e7468f8cce..5d5b7ff5b29 100644 --- a/service/sesv2/api_op_CreateDedicatedIpPool.go +++ b/service/sesv2/api_op_CreateDedicatedIpPool.go @@ -39,6 +39,9 @@ type CreateDedicatedIpPoolInput struct { // This member is required. PoolName *string + // The type of scaling mode. + ScalingMode types.ScalingMode + // An object that defines the tags (keys and values) that you want to associate // with the pool. Tags []types.Tag diff --git a/service/sesv2/api_op_DeleteCustomVerificationEmailTemplate.go b/service/sesv2/api_op_DeleteCustomVerificationEmailTemplate.go index 69719a97c89..d7a8a65068b 100644 --- a/service/sesv2/api_op_DeleteCustomVerificationEmailTemplate.go +++ b/service/sesv2/api_op_DeleteCustomVerificationEmailTemplate.go @@ -11,9 +11,9 @@ import ( ) // Deletes an existing custom verification email template. For more information -// about custom verification email templates, see Using Custom Verification Email -// Templates -// (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-verify-address-custom.html) +// about custom verification email templates, see Using custom verification email +// templates +// (https://docs.aws.amazon.com/ses/latest/dg/creating-identities.html#send-email-verify-address-custom) // in the Amazon SES Developer Guide. You can execute this operation no more than // once per second. func (c *Client) DeleteCustomVerificationEmailTemplate(ctx context.Context, params *DeleteCustomVerificationEmailTemplateInput, optFns ...func(*Options)) (*DeleteCustomVerificationEmailTemplateOutput, error) { diff --git a/service/sesv2/api_op_GetCustomVerificationEmailTemplate.go b/service/sesv2/api_op_GetCustomVerificationEmailTemplate.go index 0cc3f8a4850..e5e1b466ebc 100644 --- a/service/sesv2/api_op_GetCustomVerificationEmailTemplate.go +++ b/service/sesv2/api_op_GetCustomVerificationEmailTemplate.go @@ -12,8 +12,8 @@ import ( // Returns the custom email verification template for the template name you // specify. For more information about custom verification email templates, see -// Using Custom Verification Email Templates -// (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-verify-address-custom.html) +// Using custom verification email templates +// (https://docs.aws.amazon.com/ses/latest/dg/creating-identities.html#send-email-verify-address-custom) // in the Amazon SES Developer Guide. You can execute this operation no more than // once per second. func (c *Client) GetCustomVerificationEmailTemplate(ctx context.Context, params *GetCustomVerificationEmailTemplateInput, optFns ...func(*Options)) (*GetCustomVerificationEmailTemplateOutput, error) { diff --git a/service/sesv2/api_op_GetDedicatedIpPool.go b/service/sesv2/api_op_GetDedicatedIpPool.go new file mode 100644 index 00000000000..e0160f206d7 --- /dev/null +++ b/service/sesv2/api_op_GetDedicatedIpPool.go @@ -0,0 +1,123 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sesv2 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/sesv2/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieve information about the dedicated pool. +func (c *Client) GetDedicatedIpPool(ctx context.Context, params *GetDedicatedIpPoolInput, optFns ...func(*Options)) (*GetDedicatedIpPoolOutput, error) { + if params == nil { + params = &GetDedicatedIpPoolInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetDedicatedIpPool", params, optFns, c.addOperationGetDedicatedIpPoolMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetDedicatedIpPoolOutput) + out.ResultMetadata = metadata + return out, nil +} + +// A request to obtain more information about a dedicated IP pool. +type GetDedicatedIpPoolInput struct { + + // The name of the dedicated IP pool to retrieve. + // + // This member is required. + PoolName *string + + noSmithyDocumentSerde +} + +// The following element is returned by the service. +type GetDedicatedIpPoolOutput struct { + + // An object that contains information about a dedicated IP pool. + DedicatedIpPool *types.DedicatedIpPool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetDedicatedIpPoolMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetDedicatedIpPool{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetDedicatedIpPool{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetDedicatedIpPoolValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetDedicatedIpPool(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetDedicatedIpPool(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ses", + OperationName: "GetDedicatedIpPool", + } +} diff --git a/service/sesv2/api_op_GetDeliverabilityDashboardOptions.go b/service/sesv2/api_op_GetDeliverabilityDashboardOptions.go index 4f1347218cc..deade1483e3 100644 --- a/service/sesv2/api_op_GetDeliverabilityDashboardOptions.go +++ b/service/sesv2/api_op_GetDeliverabilityDashboardOptions.go @@ -73,11 +73,10 @@ type GetDeliverabilityDashboardOptionsOutput struct { // to expire at the end of the current calendar month. PendingExpirationSubscribedDomains []types.DomainDeliverabilityTrackingOption - // The date, in Unix time format, when your current subscription to the - // Deliverability dashboard is scheduled to expire, if your subscription is - // scheduled to expire at the end of the current calendar month. This value is null - // if you have an active subscription that isn’t due to expire at the end of the - // month. + // The date when your current subscription to the Deliverability dashboard is + // scheduled to expire, if your subscription is scheduled to expire at the end of + // the current calendar month. This value is null if you have an active + // subscription that isn’t due to expire at the end of the month. SubscriptionExpiryDate *time.Time // Metadata pertaining to the operation's result. diff --git a/service/sesv2/api_op_GetEmailIdentity.go b/service/sesv2/api_op_GetEmailIdentity.go index 8e60d2b8e03..9217856c45d 100644 --- a/service/sesv2/api_op_GetEmailIdentity.go +++ b/service/sesv2/api_op_GetEmailIdentity.go @@ -74,6 +74,25 @@ type GetEmailIdentityOutput struct { // with the email identity. Tags []types.Tag + // The verification status of the identity. The status can be one of the + // following: + // + // * PENDING – The verification process was initiated, but Amazon SES + // hasn't yet been able to verify the identity. + // + // * SUCCESS – The verification + // process completed successfully. + // + // * FAILED – The verification process failed. + // + // * + // TEMPORARY_FAILURE – A temporary issue is preventing Amazon SES from determining + // the verification status of the identity. + // + // * NOT_STARTED – The verification + // process hasn't been initiated for the identity. + VerificationStatus types.VerificationStatus + // Specifies whether or not the identity is verified. You can only send email from // verified email addresses or domains. For more information about verifying // identities, see the Amazon Pinpoint User Guide diff --git a/service/sesv2/api_op_ListCustomVerificationEmailTemplates.go b/service/sesv2/api_op_ListCustomVerificationEmailTemplates.go index 6162b75e73b..5d76964f983 100644 --- a/service/sesv2/api_op_ListCustomVerificationEmailTemplates.go +++ b/service/sesv2/api_op_ListCustomVerificationEmailTemplates.go @@ -14,8 +14,8 @@ import ( // Lists the existing custom verification email templates for your account in the // current Amazon Web Services Region. For more information about custom -// verification email templates, see Using Custom Verification Email Templates -// (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-verify-address-custom.html) +// verification email templates, see Using custom verification email templates +// (https://docs.aws.amazon.com/ses/latest/dg/creating-identities.html#send-email-verify-address-custom) // in the Amazon SES Developer Guide. You can execute this operation no more than // once per second. func (c *Client) ListCustomVerificationEmailTemplates(ctx context.Context, params *ListCustomVerificationEmailTemplatesInput, optFns ...func(*Options)) (*ListCustomVerificationEmailTemplatesOutput, error) { diff --git a/service/sesv2/api_op_ListDomainDeliverabilityCampaigns.go b/service/sesv2/api_op_ListDomainDeliverabilityCampaigns.go index 2a4758bf1be..70356a5f743 100644 --- a/service/sesv2/api_op_ListDomainDeliverabilityCampaigns.go +++ b/service/sesv2/api_op_ListDomainDeliverabilityCampaigns.go @@ -36,15 +36,13 @@ func (c *Client) ListDomainDeliverabilityCampaigns(ctx context.Context, params * // only if you enabled the Deliverability dashboard. type ListDomainDeliverabilityCampaignsInput struct { - // The last day, in Unix time format, that you want to obtain deliverability data - // for. This value has to be less than or equal to 30 days after the value of the - // StartDate parameter. + // The last day that you want to obtain deliverability data for. This value has to + // be less than or equal to 30 days after the value of the StartDate parameter. // // This member is required. EndDate *time.Time - // The first day, in Unix time format, that you want to obtain deliverability data - // for. + // The first day that you want to obtain deliverability data for. // // This member is required. StartDate *time.Time diff --git a/service/sesv2/api_op_ListSuppressedDestinations.go b/service/sesv2/api_op_ListSuppressedDestinations.go index 6dc663cf5e9..1cb2987c378 100644 --- a/service/sesv2/api_op_ListSuppressedDestinations.go +++ b/service/sesv2/api_op_ListSuppressedDestinations.go @@ -35,8 +35,7 @@ func (c *Client) ListSuppressedDestinations(ctx context.Context, params *ListSup type ListSuppressedDestinationsInput struct { // Used to filter the list of suppressed email destinations so that it only - // includes addresses that were added to the list before a specific date. The date - // that you specify should be in Unix time format. + // includes addresses that were added to the list before a specific date. EndDate *time.Time // A token returned from a previous call to ListSuppressedDestinations to indicate @@ -53,8 +52,7 @@ type ListSuppressedDestinationsInput struct { Reasons []types.SuppressionListReason // Used to filter the list of suppressed email destinations so that it only - // includes addresses that were added to the list after a specific date. The date - // that you specify should be in Unix time format. + // includes addresses that were added to the list after a specific date. StartDate *time.Time noSmithyDocumentSerde diff --git a/service/sesv2/api_op_SendCustomVerificationEmail.go b/service/sesv2/api_op_SendCustomVerificationEmail.go index 0186e8cb9b9..3e34509ffba 100644 --- a/service/sesv2/api_op_SendCustomVerificationEmail.go +++ b/service/sesv2/api_op_SendCustomVerificationEmail.go @@ -15,9 +15,9 @@ import ( // executing this operation, a customized verification email is sent to the // specified address. To use this operation, you must first create a custom // verification email template. For more information about creating and using -// custom verification email templates, see Using Custom Verification Email -// Templates -// (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-verify-address-custom.html) +// custom verification email templates, see Using custom verification email +// templates +// (https://docs.aws.amazon.com/ses/latest/dg/creating-identities.html#send-email-verify-address-custom) // in the Amazon SES Developer Guide. You can execute this operation no more than // once per second. func (c *Client) SendCustomVerificationEmail(ctx context.Context, params *SendCustomVerificationEmailInput, optFns ...func(*Options)) (*SendCustomVerificationEmailOutput, error) { diff --git a/service/sesv2/api_op_UpdateCustomVerificationEmailTemplate.go b/service/sesv2/api_op_UpdateCustomVerificationEmailTemplate.go index 6185cb3ad61..70586f510b1 100644 --- a/service/sesv2/api_op_UpdateCustomVerificationEmailTemplate.go +++ b/service/sesv2/api_op_UpdateCustomVerificationEmailTemplate.go @@ -11,9 +11,9 @@ import ( ) // Updates an existing custom verification email template. For more information -// about custom verification email templates, see Using Custom Verification Email -// Templates -// (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-verify-address-custom.html) +// about custom verification email templates, see Using custom verification email +// templates +// (https://docs.aws.amazon.com/ses/latest/dg/creating-identities.html#send-email-verify-address-custom) // in the Amazon SES Developer Guide. You can execute this operation no more than // once per second. func (c *Client) UpdateCustomVerificationEmailTemplate(ctx context.Context, params *UpdateCustomVerificationEmailTemplateInput, optFns ...func(*Options)) (*UpdateCustomVerificationEmailTemplateOutput, error) { @@ -53,8 +53,8 @@ type UpdateCustomVerificationEmailTemplateInput struct { // The content of the custom verification email. The total size of the email must // be less than 10 MB. The message body may contain HTML, with some limitations. - // For more information, see Custom Verification Email Frequently Asked Questions - // (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-verify-address-custom.html#custom-verification-emails-faq) + // For more information, see Custom verification email frequently asked questions + // (https://docs.aws.amazon.com/ses/latest/dg/creating-identities.html#send-email-verify-address-custom-faq) // in the Amazon SES Developer Guide. // // This member is required. diff --git a/service/sesv2/deserializers.go b/service/sesv2/deserializers.go index 2cee770cb31..7682ce11b80 100644 --- a/service/sesv2/deserializers.go +++ b/service/sesv2/deserializers.go @@ -3675,6 +3675,159 @@ func awsRestjson1_deserializeOpDocumentGetDedicatedIpOutput(v **GetDedicatedIpOu return nil } +type awsRestjson1_deserializeOpGetDedicatedIpPool struct { +} + +func (*awsRestjson1_deserializeOpGetDedicatedIpPool) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpGetDedicatedIpPool) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorGetDedicatedIpPool(response, &metadata) + } + output := &GetDedicatedIpPoolOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentGetDedicatedIpPoolOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorGetDedicatedIpPool(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentGetDedicatedIpPoolOutput(v **GetDedicatedIpPoolOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetDedicatedIpPoolOutput + if *v == nil { + sv = &GetDedicatedIpPoolOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DedicatedIpPool": + if err := awsRestjson1_deserializeDocumentDedicatedIpPool(&sv.DedicatedIpPool, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + type awsRestjson1_deserializeOpGetDedicatedIps struct { } @@ -4703,6 +4856,15 @@ func awsRestjson1_deserializeOpDocumentGetEmailIdentityOutput(v **GetEmailIdenti return err } + case "VerificationStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected VerificationStatus to be of type string, got %T instead", value) + } + sv.VerificationStatus = types.VerificationStatus(jtv) + } + case "VerifiedForSendingStatus": if value != nil { jtv, ok := value.(bool) @@ -12021,6 +12183,55 @@ func awsRestjson1_deserializeDocumentDedicatedIpList(v *[]types.DedicatedIp, val return nil } +func awsRestjson1_deserializeDocumentDedicatedIpPool(v **types.DedicatedIpPool, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DedicatedIpPool + if *v == nil { + sv = &types.DedicatedIpPool{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "PoolName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PoolName to be of type string, got %T instead", value) + } + sv.PoolName = ptr.String(jtv) + } + + case "ScalingMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ScalingMode to be of type string, got %T instead", value) + } + sv.ScalingMode = types.ScalingMode(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentDeliverabilityTestReport(v **types.DeliverabilityTestReport, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -13302,6 +13513,15 @@ func awsRestjson1_deserializeDocumentIdentityInfo(v **types.IdentityInfo, value sv.SendingEnabled = jtv } + case "VerificationStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected VerificationStatus to be of type string, got %T instead", value) + } + sv.VerificationStatus = types.VerificationStatus(jtv) + } + default: _, _ = key, value @@ -13473,6 +13693,19 @@ func awsRestjson1_deserializeDocumentImportJobSummary(v **types.ImportJobSummary } } + case "FailedRecordsCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected FailedRecordsCount to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.FailedRecordsCount = ptr.Int32(int32(i64)) + } + case "ImportDestination": if err := awsRestjson1_deserializeDocumentImportDestination(&sv.ImportDestination, value); err != nil { return err @@ -13496,6 +13729,19 @@ func awsRestjson1_deserializeDocumentImportJobSummary(v **types.ImportJobSummary sv.JobStatus = types.JobStatus(jtv) } + case "ProcessedRecordsCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ProcessedRecordsCount to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ProcessedRecordsCount = ptr.Int32(int32(i64)) + } + default: _, _ = key, value diff --git a/service/sesv2/generated.json b/service/sesv2/generated.json index 48026b2c48f..c1ee9374e4e 100644 --- a/service/sesv2/generated.json +++ b/service/sesv2/generated.json @@ -37,6 +37,7 @@ "api_op_GetContactList.go", "api_op_GetCustomVerificationEmailTemplate.go", "api_op_GetDedicatedIp.go", + "api_op_GetDedicatedIpPool.go", "api_op_GetDedicatedIps.go", "api_op_GetDeliverabilityDashboardOptions.go", "api_op_GetDeliverabilityTestReport.go", diff --git a/service/sesv2/serializers.go b/service/sesv2/serializers.go index 21712d729c4..228c81ac14b 100644 --- a/service/sesv2/serializers.go +++ b/service/sesv2/serializers.go @@ -556,6 +556,11 @@ func awsRestjson1_serializeOpDocumentCreateDedicatedIpPoolInput(v *CreateDedicat ok.String(*v.PoolName) } + if len(v.ScalingMode) > 0 { + ok := object.Key("ScalingMode") + ok.String(string(v.ScalingMode)) + } + if v.Tags != nil { ok := object.Key("Tags") if err := awsRestjson1_serializeDocumentTagList(v.Tags, ok); err != nil { @@ -2046,6 +2051,64 @@ func awsRestjson1_serializeOpHttpBindingsGetDedicatedIpInput(v *GetDedicatedIpIn return nil } +type awsRestjson1_serializeOpGetDedicatedIpPool struct { +} + +func (*awsRestjson1_serializeOpGetDedicatedIpPool) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetDedicatedIpPool) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetDedicatedIpPoolInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v2/email/dedicated-ip-pools/{PoolName}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsGetDedicatedIpPoolInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetDedicatedIpPoolInput(v *GetDedicatedIpPoolInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.PoolName == nil || len(*v.PoolName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member PoolName must not be empty")} + } + if v.PoolName != nil { + if err := encoder.SetURI("PoolName").String(*v.PoolName); err != nil { + return err + } + } + + return nil +} + type awsRestjson1_serializeOpGetDedicatedIps struct { } diff --git a/service/sesv2/types/enums.go b/service/sesv2/types/enums.go index 0b28cb46dfd..fe78865395c 100644 --- a/service/sesv2/types/enums.go +++ b/service/sesv2/types/enums.go @@ -391,6 +391,24 @@ func (ReviewStatus) Values() []ReviewStatus { } } +type ScalingMode string + +// Enum values for ScalingMode +const ( + ScalingModeStandard ScalingMode = "STANDARD" + ScalingModeManaged ScalingMode = "MANAGED" +) + +// Values returns all known values for ScalingMode. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (ScalingMode) Values() []ScalingMode { + return []ScalingMode{ + "STANDARD", + "MANAGED", + } +} + type SubscriptionStatus string // Enum values for SubscriptionStatus @@ -463,6 +481,30 @@ func (TlsPolicy) Values() []TlsPolicy { } } +type VerificationStatus string + +// Enum values for VerificationStatus +const ( + VerificationStatusPending VerificationStatus = "PENDING" + VerificationStatusSuccess VerificationStatus = "SUCCESS" + VerificationStatusFailed VerificationStatus = "FAILED" + VerificationStatusTemporaryFailure VerificationStatus = "TEMPORARY_FAILURE" + VerificationStatusNotStarted VerificationStatus = "NOT_STARTED" +) + +// Values returns all known values for VerificationStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (VerificationStatus) Values() []VerificationStatus { + return []VerificationStatus{ + "PENDING", + "SUCCESS", + "FAILED", + "TEMPORARY_FAILURE", + "NOT_STARTED", + } +} + type WarmupStatus string // Enum values for WarmupStatus diff --git a/service/sesv2/types/types.go b/service/sesv2/types/types.go index a773d1fae31..620ceec3aeb 100644 --- a/service/sesv2/types/types.go +++ b/service/sesv2/types/types.go @@ -49,7 +49,7 @@ type BlacklistEntry struct { // blacklist maintainer. Description *string - // The time when the blacklisting event occurred, shown in Unix time format. + // The time when the blacklisting event occurred. ListingTime *time.Time // The name of the blacklist that the IP address appears on. @@ -202,11 +202,11 @@ type CloudWatchDimensionConfiguration struct { // don't provide the value of the dimension when you send an email. This value has // to meet the following criteria: // - // * It can only contain ASCII letters (a–z, A–Z), - // numbers (0–9), underscores (_), or dashes (-). + // * Can only contain ASCII letters (a–z, A–Z), + // numbers (0–9), underscores (_), or dashes (-), at signs (@), and periods (.). // - // * It can contain no more than - // 256 characters. + // * + // It can contain no more than 256 characters. // // This member is required. DefaultDimensionValue *string @@ -388,11 +388,33 @@ type DedicatedIp struct { noSmithyDocumentSerde } +// Contains information about a dedicated IP pool. +type DedicatedIpPool struct { + + // The name of the dedicated IP pool. + // + // This member is required. + PoolName *string + + // The type of the dedicated IP pool. + // + // * STANDARD – A dedicated IP pool where the + // customer can control which IPs are part of the pool. + // + // * MANAGED – A dedicated IP + // pool where the reputation and number of IPs is automatically managed by Amazon + // SES. + // + // This member is required. + ScalingMode ScalingMode + + noSmithyDocumentSerde +} + // An object that contains metadata related to a predictive inbox placement test. type DeliverabilityTestReport struct { - // The date and time when the predictive inbox placement test was created, in Unix - // time format. + // The date and time when the predictive inbox placement test was created. CreateDate *time.Time // The status of the predictive inbox placement test. If the status is IN_PROGRESS, @@ -574,9 +596,9 @@ type DomainDeliverabilityCampaign struct { // The major email providers who handled the email message. Esps []string - // The first time, in Unix time format, when the email message was delivered to any - // recipient's inbox. This value can help you determine how long it took for a - // campaign to deliver an email message. + // The first time when the email message was delivered to any recipient's inbox. + // This value can help you determine how long it took for a campaign to deliver an + // email message. FirstSeenDateTime *time.Time // The verified email address that the email message was sent from. @@ -588,9 +610,9 @@ type DomainDeliverabilityCampaign struct { // The number of email messages that were delivered to recipients’ inboxes. InboxCount *int64 - // The last time, in Unix time format, when the email message was delivered to any - // recipient's inbox. This value can help you determine how long it took for a - // campaign to deliver an email message. + // The last time when the email message was delivered to any recipient's inbox. + // This value can help you determine how long it took for a campaign to deliver an + // email message. LastSeenDateTime *time.Time // The projected number of recipients that the email message was sent to. @@ -634,8 +656,7 @@ type DomainDeliverabilityTrackingOption struct { // the domain. InboxPlacementTrackingOption *InboxPlacementTrackingOption - // The date, in Unix time format, when you enabled the Deliverability dashboard for - // the domain. + // The date when you enabled the Deliverability dashboard for the domain. SubscriptionStartDate *time.Time noSmithyDocumentSerde @@ -860,6 +881,25 @@ type IdentityInfo struct { // authorize Amazon SES to send email from that identity. SendingEnabled bool + // The verification status of the identity. The status can be one of the + // following: + // + // * PENDING – The verification process was initiated, but Amazon SES + // hasn't yet been able to verify the identity. + // + // * SUCCESS – The verification + // process completed successfully. + // + // * FAILED – The verification process failed. + // + // * + // TEMPORARY_FAILURE – A temporary issue is preventing Amazon SES from determining + // the verification status of the identity. + // + // * NOT_STARTED – The verification + // process hasn't been initiated for the identity. + VerificationStatus VerificationStatus + noSmithyDocumentSerde } @@ -898,6 +938,10 @@ type ImportJobSummary struct { // The date and time when the import job was created. CreatedTimestamp *time.Time + // The number of records that failed processing because of invalid input or other + // reasons. + FailedRecordsCount *int32 + // An object that contains details about the resource destination the import job is // going to target. ImportDestination *ImportDestination @@ -908,6 +952,9 @@ type ImportJobSummary struct { // The status of the import job. JobStatus JobStatus + // The current number of records processed. + ProcessedRecordsCount *int32 + noSmithyDocumentSerde } @@ -991,12 +1038,12 @@ type ListManagementOptions struct { type MailFromAttributes struct { // The action to take if the required MX record can't be found when you send an - // email. When you set this value to UseDefaultValue, the mail is sent using - // amazonses.com as the MAIL FROM domain. When you set this value to RejectMessage, - // the Amazon SES API v2 returns a MailFromDomainNotVerified error, and doesn't - // attempt to deliver the email. These behaviors are taken when the custom MAIL - // FROM domain configuration is in the Pending, Failed, and TemporaryFailure - // states. + // email. When you set this value to USE_DEFAULT_VALUE, the mail is sent using + // amazonses.com as the MAIL FROM domain. When you set this value to + // REJECT_MESSAGE, the Amazon SES API v2 returns a MailFromDomainNotVerified error, + // and doesn't attempt to deliver the email. These behaviors are taken when the + // custom MAIL FROM domain configuration is in the Pending, Failed, and + // TemporaryFailure states. // // This member is required. BehaviorOnMxFailure BehaviorOnMxFailure @@ -1250,8 +1297,8 @@ type SendingOptions struct { type SendQuota struct { // The maximum number of emails that you can send in the current Amazon Web - // Services Region over a 24-hour period. This value is also called your sending - // quota. + // Services Region over a 24-hour period. A value of -1 signifies an unlimited + // quota. (This value is also referred to as your sending quota.) Max24HourSend float64 // The maximum number of emails that you can send per second in the current Amazon diff --git a/service/sesv2/validators.go b/service/sesv2/validators.go index 036ad7e0890..3b135ccf52b 100644 --- a/service/sesv2/validators.go +++ b/service/sesv2/validators.go @@ -570,6 +570,26 @@ func (m *validateOpGetDedicatedIp) HandleInitialize(ctx context.Context, in midd return next.HandleInitialize(ctx, in) } +type validateOpGetDedicatedIpPool struct { +} + +func (*validateOpGetDedicatedIpPool) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetDedicatedIpPool) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetDedicatedIpPoolInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetDedicatedIpPoolInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpGetDeliverabilityTestReport struct { } @@ -1442,6 +1462,10 @@ func addOpGetDedicatedIpValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetDedicatedIp{}, middleware.After) } +func addOpGetDedicatedIpPoolValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetDedicatedIpPool{}, middleware.After) +} + func addOpGetDeliverabilityTestReportValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetDeliverabilityTestReport{}, middleware.After) } @@ -2621,6 +2645,21 @@ func validateOpGetDedicatedIpInput(v *GetDedicatedIpInput) error { } } +func validateOpGetDedicatedIpPoolInput(v *GetDedicatedIpPoolInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetDedicatedIpPoolInput"} + if v.PoolName == nil { + invalidParams.Add(smithy.NewErrParamRequired("PoolName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpGetDeliverabilityTestReportInput(v *GetDeliverabilityTestReportInput) error { if v == nil { return nil