From 16794f5ed38fcc69383795b51cd064972e7c4da9 Mon Sep 17 00:00:00 2001 From: AWS SDK for Go v2 automation user Date: Mon, 23 May 2022 18:14:22 +0000 Subject: [PATCH] Regenerated Clients --- .../312585591bc54dd38f573d234493b607.json | 8 + .../49c440fe570849afab6c336fbe83e0af.json | 8 + .../cc8dd68dfa4441f6b2ec389f87df4799.json | 8 + .../elasticache/api_op_CreateCacheCluster.go | 64 +- .../api_op_CreateReplicationGroup.go | 68 +- .../api_op_DescribeReservedCacheNodes.go | 68 +- ..._op_DescribeReservedCacheNodesOfferings.go | 68 +- service/elasticache/serializers.go | 5 + service/elasticache/types/types.go | 340 ++- .../forecast/api_op_CreateAutoPredictor.go | 37 +- service/forecast/api_op_CreateDataset.go | 35 +- service/forecast/api_op_CreateDatasetGroup.go | 33 +- .../forecast/api_op_CreateDatasetImportJob.go | 27 +- .../forecast/api_op_CreateExplainability.go | 2 +- service/forecast/api_op_CreateForecast.go | 4 +- service/forecast/api_op_CreateMonitor.go | 134 + service/forecast/api_op_DeleteDataset.go | 12 +- service/forecast/api_op_DeleteDatasetGroup.go | 11 +- .../forecast/api_op_DeleteDatasetImportJob.go | 9 +- service/forecast/api_op_DeleteMonitor.go | 117 + .../forecast/api_op_DescribeAutoPredictor.go | 3 + service/forecast/api_op_DescribeDataset.go | 29 +- .../forecast/api_op_DescribeDatasetGroup.go | 27 +- .../api_op_DescribeDatasetImportJob.go | 1 + service/forecast/api_op_DescribeMonitor.go | 171 ++ service/forecast/api_op_ListDatasetGroups.go | 11 +- .../forecast/api_op_ListDatasetImportJobs.go | 5 +- service/forecast/api_op_ListDatasets.go | 11 +- .../forecast/api_op_ListExplainabilities.go | 92 + .../api_op_ListExplainabilityExports.go | 93 + .../forecast/api_op_ListMonitorEvaluations.go | 257 ++ service/forecast/api_op_ListMonitors.go | 241 ++ service/forecast/api_op_ResumeResource.go | 116 + service/forecast/api_op_UpdateDatasetGroup.go | 4 +- service/forecast/deserializers.go | 2250 +++++++++++++++-- service/forecast/generated.json | 6 + service/forecast/serializers.go | 462 ++++ service/forecast/types/types.go | 260 +- service/forecast/validators.go | 269 ++ .../personalize/api_op_CreateRecommender.go | 10 +- .../personalize/api_op_DescribeRecommender.go | 10 +- .../personalize/api_op_GetSolutionMetrics.go | 4 +- service/personalize/deserializers.go | 8 + service/personalize/types/types.go | 18 +- .../proton/internal/endpoints/endpoints.go | 17 + .../internal/endpoints/endpoints.go | 56 + 46 files changed, 4781 insertions(+), 708 deletions(-) create mode 100644 .changelog/312585591bc54dd38f573d234493b607.json create mode 100644 .changelog/49c440fe570849afab6c336fbe83e0af.json create mode 100644 .changelog/cc8dd68dfa4441f6b2ec389f87df4799.json create mode 100644 service/forecast/api_op_CreateMonitor.go create mode 100644 service/forecast/api_op_DeleteMonitor.go create mode 100644 service/forecast/api_op_DescribeMonitor.go create mode 100644 service/forecast/api_op_ListMonitorEvaluations.go create mode 100644 service/forecast/api_op_ListMonitors.go create mode 100644 service/forecast/api_op_ResumeResource.go diff --git a/.changelog/312585591bc54dd38f573d234493b607.json b/.changelog/312585591bc54dd38f573d234493b607.json new file mode 100644 index 00000000000..9a385d86438 --- /dev/null +++ b/.changelog/312585591bc54dd38f573d234493b607.json @@ -0,0 +1,8 @@ +{ + "id": "31258559-1bc5-4dd3-8f57-3d234493b607", + "type": "feature", + "description": "Adding modelMetrics as part of DescribeRecommender API response for Personalize.", + "modules": [ + "service/personalize" + ] +} \ No newline at end of file diff --git a/.changelog/49c440fe570849afab6c336fbe83e0af.json b/.changelog/49c440fe570849afab6c336fbe83e0af.json new file mode 100644 index 00000000000..752c009989d --- /dev/null +++ b/.changelog/49c440fe570849afab6c336fbe83e0af.json @@ -0,0 +1,8 @@ +{ + "id": "49c440fe-5708-49af-ab6c-336fbe83e0af", + "type": "feature", + "description": "New APIs for Monitor that help you understand how your predictors perform over time.", + "modules": [ + "service/forecast" + ] +} \ No newline at end of file diff --git a/.changelog/cc8dd68dfa4441f6b2ec389f87df4799.json b/.changelog/cc8dd68dfa4441f6b2ec389f87df4799.json new file mode 100644 index 00000000000..8c73893ca96 --- /dev/null +++ b/.changelog/cc8dd68dfa4441f6b2ec389f87df4799.json @@ -0,0 +1,8 @@ +{ + "id": "cc8dd68d-fa44-41f6-b2ec-389f87df4799", + "type": "feature", + "description": "Added support for encryption in transit for Memcached clusters. Customers can now launch Memcached cluster with encryption in transit enabled when using Memcached version 1.6.12 or later.", + "modules": [ + "service/elasticache" + ] +} \ No newline at end of file diff --git a/service/elasticache/api_op_CreateCacheCluster.go b/service/elasticache/api_op_CreateCacheCluster.go index 3187df4e253..9e7a72f5690 100644 --- a/service/elasticache/api_op_CreateCacheCluster.go +++ b/service/elasticache/api_op_CreateCacheCluster.go @@ -98,54 +98,49 @@ type CreateCacheClusterInput struct { // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, // cache.t2.medium // - // * Previous generation: (not recommended. Existing clusters are - // still supported but creation of new clusters is not supported for these types.) - // T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, - // cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, - // cache.m3.xlarge, cache.m3.2xlarge + // * Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // * Compute optimized: // - // * Previous generation: - // (not recommended. Existing clusters are still supported but creation of new - // clusters is not supported for these types.) C1 node types: cache.c1.xlarge + // * Previous generation: (not recommended) + // C1 node types: cache.c1.xlarge // - // * - // Memory optimized: + // * Memory optimized: // - // * Current generation: R6g node types (available only for - // Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 - // onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge For - // region availability, see Supported Node Types + // * Current generation: R6g + // node types (available only for Redis engine version 5.0.6 onward and for + // Memcached engine version 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, + // cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + // cache.r6g.16xlarge For region availability, see Supported Node Types // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) // R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, // cache.r4.8xlarge, cache.r4.16xlarge // - // * Previous generation: (not recommended. - // Existing clusters are still supported but creation of new clusters is not - // supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, - // cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, - // cache.r3.2xlarge, + // * Previous generation: (not recommended) M2 + // node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: + // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, // - // cache.r3.4xlarge, cache.r3.8xlarge + // cache.r3.4xlarge, + // cache.r3.8xlarge // - // Additional node type - // info + // Additional node type info // - // * All current generation instance types are created in Amazon VPC by - // default. + // * All current generation instance + // types are created in Amazon VPC by default. // - // * Redis append-only files (AOF) are not supported for T1 or T2 - // instances. + // * Redis append-only files (AOF) are + // not supported for T1 or T2 instances. // - // * Redis Multi-AZ with automatic failover is not supported on T1 - // instances. + // * Redis Multi-AZ with automatic failover + // is not supported on T1 instances. // - // * Redis configuration variables appendonly and appendfsync are not - // supported on Redis version 2.8.22 and later. + // * Redis configuration variables appendonly + // and appendfsync are not supported on Redis version 2.8.22 and later. CacheNodeType *string // The name of the parameter group to associate with this cluster. If this argument @@ -276,6 +271,13 @@ type CreateCacheClusterInput struct { // A list of tags to be added to this resource. Tags []types.Tag + // A flag that enables in-transit encryption when set to true. You cannot modify + // the value of TransitEncryptionEnabled after the cluster is created. To enable + // in-transit encryption on a cluster you must set TransitEncryptionEnabled to true + // when you create a cluster. Required: Only available when creating a cache + // cluster in an Amazon VPC using Memcached version 1.6.12 or later. + TransitEncryptionEnabled *bool + noSmithyDocumentSerde } diff --git a/service/elasticache/api_op_CreateReplicationGroup.go b/service/elasticache/api_op_CreateReplicationGroup.go index bfa1efc32b9..c22d9545865 100644 --- a/service/elasticache/api_op_CreateReplicationGroup.go +++ b/service/elasticache/api_op_CreateReplicationGroup.go @@ -142,61 +142,57 @@ type CreateReplicationGroupInput struct { // cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, // cache.t2.medium // - // * Previous generation: (not recommended. Existing clusters are - // still supported but creation of new clusters is not supported for these types.) - // T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, - // cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, - // cache.m3.xlarge, cache.m3.2xlarge + // * Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // * Compute optimized: // - // * Previous generation: - // (not recommended. Existing clusters are still supported but creation of new - // clusters is not supported for these types.) C1 node types: cache.c1.xlarge + // * Previous generation: (not recommended) + // C1 node types: cache.c1.xlarge // - // * - // Memory optimized with data tiering: + // * Memory optimized with data tiering: // - // * Current generation: R6gd node types - // (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge, - // cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge, - // cache.r6gd.16xlarge + // * Current + // generation: R6gd node types (available only for Redis engine version 6.2 + // onward). cache.r6gd.xlarge, cache.r6gd.2xlarge, cache.r6gd.4xlarge, + // cache.r6gd.8xlarge, cache.r6gd.12xlarge, cache.r6gd.16xlarge // - // * Memory optimized: + // * Memory + // optimized: // - // * Current generation: R6g node types - // (available only for Redis engine version 5.0.6 onward and for Memcached engine - // version 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge For - // region availability, see Supported Node Types + // * Current generation: R6g node types (available only for Redis + // engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward). + // cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, + // cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge For region + // availability, see Supported Node Types // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) // R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, // cache.r4.8xlarge, cache.r4.16xlarge // - // * Previous generation: (not recommended. - // Existing clusters are still supported but creation of new clusters is not - // supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, - // cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, - // cache.r3.2xlarge, + // * Previous generation: (not recommended) M2 + // node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: + // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, // - // cache.r3.4xlarge, cache.r3.8xlarge + // cache.r3.4xlarge, + // cache.r3.8xlarge // - // Additional node type - // info + // Additional node type info // - // * All current generation instance types are created in Amazon VPC by - // default. + // * All current generation instance + // types are created in Amazon VPC by default. // - // * Redis append-only files (AOF) are not supported for T1 or T2 - // instances. + // * Redis append-only files (AOF) are + // not supported for T1 or T2 instances. // - // * Redis Multi-AZ with automatic failover is not supported on T1 - // instances. + // * Redis Multi-AZ with automatic failover + // is not supported on T1 instances. // - // * Redis configuration variables appendonly and appendfsync are not - // supported on Redis version 2.8.22 and later. + // * Redis configuration variables appendonly + // and appendfsync are not supported on Redis version 2.8.22 and later. CacheNodeType *string // The name of the parameter group to associate with this replication group. If diff --git a/service/elasticache/api_op_DescribeReservedCacheNodes.go b/service/elasticache/api_op_DescribeReservedCacheNodes.go index c5855b8f3ae..d91ae077c8d 100644 --- a/service/elasticache/api_op_DescribeReservedCacheNodes.go +++ b/service/elasticache/api_op_DescribeReservedCacheNodes.go @@ -56,61 +56,57 @@ type DescribeReservedCacheNodesInput struct { // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, // cache.t2.medium // - // * Previous generation: (not recommended. Existing clusters are - // still supported but creation of new clusters is not supported for these types.) - // T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, - // cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, - // cache.m3.xlarge, cache.m3.2xlarge + // * Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // * Compute optimized: // - // * Previous generation: - // (not recommended. Existing clusters are still supported but creation of new - // clusters is not supported for these types.) C1 node types: cache.c1.xlarge + // * Previous generation: (not recommended) + // C1 node types: cache.c1.xlarge // - // * - // Memory optimized with data tiering: + // * Memory optimized with data tiering: // - // * Current generation: R6gd node types - // (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge, - // cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge, - // cache.r6gd.16xlarge + // * Current + // generation: R6gd node types (available only for Redis engine version 6.2 + // onward). cache.r6gd.xlarge, cache.r6gd.2xlarge, cache.r6gd.4xlarge, + // cache.r6gd.8xlarge, cache.r6gd.12xlarge, cache.r6gd.16xlarge // - // * Memory optimized: + // * Memory + // optimized: // - // * Current generation: R6g node types - // (available only for Redis engine version 5.0.6 onward and for Memcached engine - // version 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge For - // region availability, see Supported Node Types + // * Current generation: R6g node types (available only for Redis + // engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward). + // cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, + // cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge For region + // availability, see Supported Node Types // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) // R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, // cache.r4.8xlarge, cache.r4.16xlarge // - // * Previous generation: (not recommended. - // Existing clusters are still supported but creation of new clusters is not - // supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, - // cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, - // cache.r3.2xlarge, + // * Previous generation: (not recommended) M2 + // node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: + // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, // - // cache.r3.4xlarge, cache.r3.8xlarge + // cache.r3.4xlarge, + // cache.r3.8xlarge // - // Additional node type - // info + // Additional node type info // - // * All current generation instance types are created in Amazon VPC by - // default. + // * All current generation instance + // types are created in Amazon VPC by default. // - // * Redis append-only files (AOF) are not supported for T1 or T2 - // instances. + // * Redis append-only files (AOF) are + // not supported for T1 or T2 instances. // - // * Redis Multi-AZ with automatic failover is not supported on T1 - // instances. + // * Redis Multi-AZ with automatic failover + // is not supported on T1 instances. // - // * Redis configuration variables appendonly and appendfsync are not - // supported on Redis version 2.8.22 and later. + // * Redis configuration variables appendonly + // and appendfsync are not supported on Redis version 2.8.22 and later. CacheNodeType *string // The duration filter value, specified in years or seconds. Use this parameter to diff --git a/service/elasticache/api_op_DescribeReservedCacheNodesOfferings.go b/service/elasticache/api_op_DescribeReservedCacheNodesOfferings.go index cccf7345cd1..958dbe0d442 100644 --- a/service/elasticache/api_op_DescribeReservedCacheNodesOfferings.go +++ b/service/elasticache/api_op_DescribeReservedCacheNodesOfferings.go @@ -55,61 +55,57 @@ type DescribeReservedCacheNodesOfferingsInput struct { // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, // cache.t2.medium // - // * Previous generation: (not recommended. Existing clusters are - // still supported but creation of new clusters is not supported for these types.) - // T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, - // cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, - // cache.m3.xlarge, cache.m3.2xlarge + // * Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // * Compute optimized: // - // * Previous generation: - // (not recommended. Existing clusters are still supported but creation of new - // clusters is not supported for these types.) C1 node types: cache.c1.xlarge + // * Previous generation: (not recommended) + // C1 node types: cache.c1.xlarge // - // * - // Memory optimized with data tiering: + // * Memory optimized with data tiering: // - // * Current generation: R6gd node types - // (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge, - // cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge, - // cache.r6gd.16xlarge + // * Current + // generation: R6gd node types (available only for Redis engine version 6.2 + // onward). cache.r6gd.xlarge, cache.r6gd.2xlarge, cache.r6gd.4xlarge, + // cache.r6gd.8xlarge, cache.r6gd.12xlarge, cache.r6gd.16xlarge // - // * Memory optimized: + // * Memory + // optimized: // - // * Current generation: R6g node types - // (available only for Redis engine version 5.0.6 onward and for Memcached engine - // version 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge For - // region availability, see Supported Node Types + // * Current generation: R6g node types (available only for Redis + // engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward). + // cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, + // cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge For region + // availability, see Supported Node Types // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) // R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, // cache.r4.8xlarge, cache.r4.16xlarge // - // * Previous generation: (not recommended. - // Existing clusters are still supported but creation of new clusters is not - // supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, - // cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, - // cache.r3.2xlarge, + // * Previous generation: (not recommended) M2 + // node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: + // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, // - // cache.r3.4xlarge, cache.r3.8xlarge + // cache.r3.4xlarge, + // cache.r3.8xlarge // - // Additional node type - // info + // Additional node type info // - // * All current generation instance types are created in Amazon VPC by - // default. + // * All current generation instance + // types are created in Amazon VPC by default. // - // * Redis append-only files (AOF) are not supported for T1 or T2 - // instances. + // * Redis append-only files (AOF) are + // not supported for T1 or T2 instances. // - // * Redis Multi-AZ with automatic failover is not supported on T1 - // instances. + // * Redis Multi-AZ with automatic failover + // is not supported on T1 instances. // - // * Redis configuration variables appendonly and appendfsync are not - // supported on Redis version 2.8.22 and later. + // * Redis configuration variables appendonly + // and appendfsync are not supported on Redis version 2.8.22 and later. CacheNodeType *string // Duration filter value, specified in years or seconds. Use this parameter to show diff --git a/service/elasticache/serializers.go b/service/elasticache/serializers.go index 95ba9e6e09c..6f378b6c13e 100644 --- a/service/elasticache/serializers.go +++ b/service/elasticache/serializers.go @@ -5192,6 +5192,11 @@ func awsAwsquery_serializeOpDocumentCreateCacheClusterInput(v *CreateCacheCluste } } + if v.TransitEncryptionEnabled != nil { + objectKey := object.Key("TransitEncryptionEnabled") + objectKey.Boolean(*v.TransitEncryptionEnabled) + } + return nil } diff --git a/service/elasticache/types/types.go b/service/elasticache/types/types.go index 561b8d3dcae..85668393f16 100644 --- a/service/elasticache/types/types.go +++ b/service/elasticache/types/types.go @@ -88,61 +88,57 @@ type CacheCluster struct { // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, // cache.t2.medium // - // * Previous generation: (not recommended. Existing clusters are - // still supported but creation of new clusters is not supported for these types.) - // T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, - // cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, - // cache.m3.xlarge, cache.m3.2xlarge + // * Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // * Compute optimized: // - // * Previous generation: - // (not recommended. Existing clusters are still supported but creation of new - // clusters is not supported for these types.) C1 node types: cache.c1.xlarge + // * Previous generation: (not recommended) + // C1 node types: cache.c1.xlarge // - // * - // Memory optimized with data tiering: + // * Memory optimized with data tiering: // - // * Current generation: R6gd node types - // (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge, - // cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge, - // cache.r6gd.16xlarge + // * Current + // generation: R6gd node types (available only for Redis engine version 6.2 + // onward). cache.r6gd.xlarge, cache.r6gd.2xlarge, cache.r6gd.4xlarge, + // cache.r6gd.8xlarge, cache.r6gd.12xlarge, cache.r6gd.16xlarge // - // * Memory optimized: + // * Memory + // optimized: // - // * Current generation: R6g node types - // (available only for Redis engine version 5.0.6 onward and for Memcached engine - // version 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge For - // region availability, see Supported Node Types + // * Current generation: R6g node types (available only for Redis + // engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward). + // cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, + // cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge For region + // availability, see Supported Node Types // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) // R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, // cache.r4.8xlarge, cache.r4.16xlarge // - // * Previous generation: (not recommended. - // Existing clusters are still supported but creation of new clusters is not - // supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, - // cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, - // cache.r3.2xlarge, + // * Previous generation: (not recommended) M2 + // node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: + // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, // - // cache.r3.4xlarge, cache.r3.8xlarge + // cache.r3.4xlarge, + // cache.r3.8xlarge // - // Additional node type - // info + // Additional node type info // - // * All current generation instance types are created in Amazon VPC by - // default. + // * All current generation instance + // types are created in Amazon VPC by default. // - // * Redis append-only files (AOF) are not supported for T1 or T2 - // instances. + // * Redis append-only files (AOF) are + // not supported for T1 or T2 instances. // - // * Redis Multi-AZ with automatic failover is not supported on T1 - // instances. + // * Redis Multi-AZ with automatic failover + // is not supported on T1 instances. // - // * Redis configuration variables appendonly and appendfsync are not - // supported on Redis version 2.8.22 and later. + // * Redis configuration variables appendonly + // and appendfsync are not supported on Redis version 2.8.22 and later. CacheNodeType *string // A list of cache nodes that are members of the cluster. @@ -297,61 +293,57 @@ type CacheEngineVersion struct { // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, // cache.t2.medium // -// * Previous generation: (not recommended. Existing clusters are -// still supported but creation of new clusters is not supported for these types.) -// T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, -// cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, -// cache.m3.xlarge, cache.m3.2xlarge +// * Previous generation: (not recommended) T1 node types: +// cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, +// cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, +// cache.m3.2xlarge // // * Compute optimized: // -// * Previous generation: -// (not recommended. Existing clusters are still supported but creation of new -// clusters is not supported for these types.) C1 node types: cache.c1.xlarge +// * Previous generation: (not recommended) +// C1 node types: cache.c1.xlarge // -// * -// Memory optimized with data tiering: +// * Memory optimized with data tiering: // -// * Current generation: R6gd node types -// (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge, -// cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge, -// cache.r6gd.16xlarge +// * Current +// generation: R6gd node types (available only for Redis engine version 6.2 +// onward). cache.r6gd.xlarge, cache.r6gd.2xlarge, cache.r6gd.4xlarge, +// cache.r6gd.8xlarge, cache.r6gd.12xlarge, cache.r6gd.16xlarge // -// * Memory optimized: +// * Memory +// optimized: // -// * Current generation: R6g node types -// (available only for Redis engine version 5.0.6 onward and for Memcached engine -// version 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, -// cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge For -// region availability, see Supported Node Types +// * Current generation: R6g node types (available only for Redis +// engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward). +// cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, +// cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge For region +// availability, see Supported Node Types // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) // R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, // cache.r4.8xlarge, cache.r4.16xlarge // -// * Previous generation: (not recommended. -// Existing clusters are still supported but creation of new clusters is not -// supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, -// cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, -// cache.r3.2xlarge, +// * Previous generation: (not recommended) M2 +// node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: +// cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, // -// cache.r3.4xlarge, cache.r3.8xlarge +// cache.r3.4xlarge, +// cache.r3.8xlarge // -// Additional node type -// info +// Additional node type info // -// * All current generation instance types are created in Amazon VPC by -// default. +// * All current generation instance +// types are created in Amazon VPC by default. // -// * Redis append-only files (AOF) are not supported for T1 or T2 -// instances. +// * Redis append-only files (AOF) are +// not supported for T1 or T2 instances. // -// * Redis Multi-AZ with automatic failover is not supported on T1 -// instances. +// * Redis Multi-AZ with automatic failover +// is not supported on T1 instances. // -// * Redis configuration variables appendonly and appendfsync are not -// supported on Redis version 2.8.22 and later. +// * Redis configuration variables appendonly +// and appendfsync are not supported on Redis version 2.8.22 and later. type CacheNode struct { // The date and time when the cache node was created. @@ -1416,61 +1408,57 @@ type ReservedCacheNode struct { // cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, // cache.t2.medium // - // * Previous generation: (not recommended. Existing clusters are - // still supported but creation of new clusters is not supported for these types.) - // T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, - // cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, - // cache.m3.xlarge, cache.m3.2xlarge + // * Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // * Compute optimized: // - // * Previous generation: - // (not recommended. Existing clusters are still supported but creation of new - // clusters is not supported for these types.) C1 node types: cache.c1.xlarge + // * Previous generation: (not recommended) + // C1 node types: cache.c1.xlarge // - // * - // Memory optimized with data tiering: + // * Memory optimized with data tiering: // - // * Current generation: R6gd node types - // (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge, - // cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge, - // cache.r6gd.16xlarge + // * Current + // generation: R6gd node types (available only for Redis engine version 6.2 + // onward). cache.r6gd.xlarge, cache.r6gd.2xlarge, cache.r6gd.4xlarge, + // cache.r6gd.8xlarge, cache.r6gd.12xlarge, cache.r6gd.16xlarge // - // * Memory optimized: + // * Memory + // optimized: // - // * Current generation: R6g node types - // (available only for Redis engine version 5.0.6 onward and for Memcached engine - // version 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge For - // region availability, see Supported Node Types + // * Current generation: R6g node types (available only for Redis + // engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward). + // cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, + // cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge For region + // availability, see Supported Node Types // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) // R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, // cache.r4.8xlarge, cache.r4.16xlarge // - // * Previous generation: (not recommended. - // Existing clusters are still supported but creation of new clusters is not - // supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, - // cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, - // cache.r3.2xlarge, + // * Previous generation: (not recommended) M2 + // node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: + // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, // - // cache.r3.4xlarge, cache.r3.8xlarge + // cache.r3.4xlarge, + // cache.r3.8xlarge // - // Additional node type - // info + // Additional node type info // - // * All current generation instance types are created in Amazon VPC by - // default. + // * All current generation instance + // types are created in Amazon VPC by default. // - // * Redis append-only files (AOF) are not supported for T1 or T2 - // instances. + // * Redis append-only files (AOF) are + // not supported for T1 or T2 instances. // - // * Redis Multi-AZ with automatic failover is not supported on T1 - // instances. + // * Redis Multi-AZ with automatic failover + // is not supported on T1 instances. // - // * Redis configuration variables appendonly and appendfsync are not - // supported on Redis version 2.8.22 and later. + // * Redis configuration variables appendonly + // and appendfsync are not supported on Redis version 2.8.22 and later. CacheNodeType *string // The duration of the reservation in seconds. @@ -1536,61 +1524,57 @@ type ReservedCacheNodesOffering struct { // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, // cache.t2.medium // - // * Previous generation: (not recommended. Existing clusters are - // still supported but creation of new clusters is not supported for these types.) - // T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, - // cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, - // cache.m3.xlarge, cache.m3.2xlarge + // * Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // * Compute optimized: // - // * Previous generation: - // (not recommended. Existing clusters are still supported but creation of new - // clusters is not supported for these types.) C1 node types: cache.c1.xlarge + // * Previous generation: (not recommended) + // C1 node types: cache.c1.xlarge // - // * - // Memory optimized with data tiering: + // * Memory optimized with data tiering: // - // * Current generation: R6gd node types - // (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge, - // cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge, - // cache.r6gd.16xlarge + // * Current + // generation: R6gd node types (available only for Redis engine version 6.2 + // onward). cache.r6gd.xlarge, cache.r6gd.2xlarge, cache.r6gd.4xlarge, + // cache.r6gd.8xlarge, cache.r6gd.12xlarge, cache.r6gd.16xlarge // - // * Memory optimized: + // * Memory + // optimized: // - // * Current generation: R6g node types - // (available only for Redis engine version 5.0.6 onward and for Memcached engine - // version 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge For - // region availability, see Supported Node Types + // * Current generation: R6g node types (available only for Redis + // engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward). + // cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, + // cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge For region + // availability, see Supported Node Types // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) // R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, // cache.r4.8xlarge, cache.r4.16xlarge // - // * Previous generation: (not recommended. - // Existing clusters are still supported but creation of new clusters is not - // supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, - // cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, - // cache.r3.2xlarge, + // * Previous generation: (not recommended) M2 + // node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: + // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, // - // cache.r3.4xlarge, cache.r3.8xlarge + // cache.r3.4xlarge, + // cache.r3.8xlarge // - // Additional node type - // info + // Additional node type info // - // * All current generation instance types are created in Amazon VPC by - // default. + // * All current generation instance + // types are created in Amazon VPC by default. // - // * Redis append-only files (AOF) are not supported for T1 or T2 - // instances. + // * Redis append-only files (AOF) are + // not supported for T1 or T2 instances. // - // * Redis Multi-AZ with automatic failover is not supported on T1 - // instances. + // * Redis Multi-AZ with automatic failover + // is not supported on T1 instances. // - // * Redis configuration variables appendonly and appendfsync are not - // supported on Redis version 2.8.22 and later. + // * Redis configuration variables appendonly + // and appendfsync are not supported on Redis version 2.8.22 and later. CacheNodeType *string // The duration of the offering. in seconds. @@ -1754,33 +1738,31 @@ type Snapshot struct { // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, // cache.t2.medium // - // * Previous generation: (not recommended. Existing clusters are - // still supported but creation of new clusters is not supported for these types.) - // T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, - // cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, - // cache.m3.xlarge, cache.m3.2xlarge + // * Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // * Compute optimized: // - // * Previous generation: - // (not recommended. Existing clusters are still supported but creation of new - // clusters is not supported for these types.) C1 node types: cache.c1.xlarge + // * Previous generation: (not recommended) + // C1 node types: cache.c1.xlarge // - // * - // Memory optimized with data tiering: + // * Memory optimized with data tiering: // - // * Current generation: R6gd node types - // (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge, - // cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge, - // cache.r6gd.16xlarge + // * Current + // generation: R6gd node types (available only for Redis engine version 6.2 + // onward). cache.r6gd.xlarge, cache.r6gd.2xlarge, cache.r6gd.4xlarge, + // cache.r6gd.8xlarge, cache.r6gd.12xlarge, cache.r6gd.16xlarge // - // * Memory optimized: + // * Memory + // optimized: // - // * Current generation: R6g node types - // (available only for Redis engine version 5.0.6 onward and for Memcached engine - // version 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge For - // region availability, see Supported Node Types + // * Current generation: R6g node types (available only for Redis + // engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward). + // cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, + // cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge For region + // availability, see Supported Node Types // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) // For region availability, see Supported Node Types // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) @@ -1789,28 +1771,26 @@ type Snapshot struct { // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, // cache.r4.8xlarge, cache.r4.16xlarge // - // * Previous generation: (not recommended. - // Existing clusters are still supported but creation of new clusters is not - // supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, - // cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, - // cache.r3.2xlarge, + // * Previous generation: (not recommended) M2 + // node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: + // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, // - // cache.r3.4xlarge, cache.r3.8xlarge + // cache.r3.4xlarge, + // cache.r3.8xlarge // - // Additional node type - // info + // Additional node type info // - // * All current generation instance types are created in Amazon VPC by - // default. + // * All current generation instance + // types are created in Amazon VPC by default. // - // * Redis append-only files (AOF) are not supported for T1 or T2 - // instances. + // * Redis append-only files (AOF) are + // not supported for T1 or T2 instances. // - // * Redis Multi-AZ with automatic failover is not supported on T1 - // instances. + // * Redis Multi-AZ with automatic failover + // is not supported on T1 instances. // - // * Redis configuration variables appendonly and appendfsync are not - // supported on Redis version 2.8.22 and later. + // * Redis configuration variables appendonly + // and appendfsync are not supported on Redis version 2.8.22 and later. CacheNodeType *string // The cache parameter group that is associated with the source cluster. diff --git a/service/forecast/api_op_CreateAutoPredictor.go b/service/forecast/api_op_CreateAutoPredictor.go index 4f759968679..4ae37d01df3 100644 --- a/service/forecast/api_op_CreateAutoPredictor.go +++ b/service/forecast/api_op_CreateAutoPredictor.go @@ -27,21 +27,21 @@ import ( // granularity of your forecasts (hourly, daily, weekly, etc). // // * ForecastHorizon - -// The number of time steps being forecasted. +// The number of time-steps that the model predicts. The forecast horizon is also +// called the prediction length. // -// When creating a new predictor, do -// not specify a value for ReferencePredictorArn. Upgrading and retraining -// predictors The following parameters are required when retraining or upgrading a -// predictor: -// -// * PredictorName - A unique name for the predictor. +// When creating a new predictor, do not specify a +// value for ReferencePredictorArn. Upgrading and retraining predictors The +// following parameters are required when retraining or upgrading a predictor: // // * -// ReferencePredictorArn - The ARN of the predictor to retrain or upgrade. +// PredictorName - A unique name for the predictor. // -// When -// upgrading or retraining a predictor, only specify values for the -// ReferencePredictorArn and PredictorName. +// * ReferencePredictorArn - The +// ARN of the predictor to retrain or upgrade. +// +// When upgrading or retraining a +// predictor, only specify values for the ReferencePredictorArn and PredictorName. func (c *Client) CreateAutoPredictor(ctx context.Context, params *CreateAutoPredictorInput, optFns ...func(*Options)) (*CreateAutoPredictorOutput, error) { if params == nil { params = &CreateAutoPredictorInput{} @@ -91,7 +91,13 @@ type CreateAutoPredictorInput struct { ForecastFrequency *string // The number of time-steps that the model predicts. The forecast horizon is also - // called the prediction length. + // called the prediction length. The maximum forecast horizon is the lesser of 500 + // time-steps or 1/4 of the TARGET_TIME_SERIES dataset length. If you are + // retraining an existing AutoPredictor, then the maximum forecast horizon is the + // lesser of 500 time-steps or 1/3 of the TARGET_TIME_SERIES dataset length. If you + // are upgrading to an AutoPredictor or retraining an existing AutoPredictor, you + // cannot update the forecast horizon parameter. You can meet this requirement by + // providing longer time-series in the dataset. ForecastHorizon *int32 // The forecast types used to train a predictor. You can specify up to five @@ -99,6 +105,13 @@ type CreateAutoPredictorInput struct { // of 0.01 or higher. You can also specify the mean forecast with mean. ForecastTypes []string + // The configuration details for predictor monitoring. Provide a name for the + // monitor resource to enable predictor monitoring. Predictor monitoring allows you + // to see how your predictor's performance changes over time. For more information, + // see Predictor Monitoring + // (https://docs.aws.amazon.com/forecast/latest/dg/predictor-monitoring.html). + MonitorConfig *types.MonitorConfig + // The accuracy metric used to optimize the predictor. OptimizationMetric types.OptimizationMetric diff --git a/service/forecast/api_op_CreateDataset.go b/service/forecast/api_op_CreateDataset.go index dc02a85ecd0..1a61143ebde 100644 --- a/service/forecast/api_op_CreateDataset.go +++ b/service/forecast/api_op_CreateDataset.go @@ -30,12 +30,16 @@ import ( // After creating a // dataset, you import your training data into it and add the dataset to a dataset // group. You use the dataset group to create a predictor. For more information, -// see howitworks-datasets-groups. To get a list of all your datasets, use the -// ListDatasets operation. For example Forecast datasets, see the Amazon Forecast -// Sample GitHub repository -// (https://github.com/aws-samples/amazon-forecast-samples). The Status of a -// dataset must be ACTIVE before you can import training data. Use the -// DescribeDataset operation to get the status. +// see Importing datasets +// (https://docs.aws.amazon.com/forecast/latest/dg/howitworks-datasets-groups.html). +// To get a list of all your datasets, use the ListDatasets +// (https://docs.aws.amazon.com/forecast/latest/dg/API_ListDatasets.html) +// operation. For example Forecast datasets, see the Amazon Forecast Sample GitHub +// repository (https://github.com/aws-samples/amazon-forecast-samples). The Status +// of a dataset must be ACTIVE before you can import training data. Use the +// DescribeDataset +// (https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDataset.html) +// operation to get the status. func (c *Client) CreateDataset(ctx context.Context, params *CreateDatasetInput, optFns ...func(*Options)) (*CreateDatasetOutput, error) { if params == nil { params = &CreateDatasetInput{} @@ -65,12 +69,14 @@ type CreateDatasetInput struct { // The domain associated with the dataset. When you add a dataset to a dataset // group, this value and the value specified for the Domain parameter of the - // CreateDatasetGroup operation must match. The Domain and DatasetType that you - // choose determine the fields that must be present in the training data that you - // import to the dataset. For example, if you choose the RETAIL domain and - // TARGET_TIME_SERIES as the DatasetType, Amazon Forecast requires item_id, - // timestamp, and demand fields to be present in your data. For more information, - // see howitworks-datasets-groups. + // CreateDatasetGroup + // (https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetGroup.html) + // operation must match. The Domain and DatasetType that you choose determine the + // fields that must be present in the training data that you import to the dataset. + // For example, if you choose the RETAIL domain and TARGET_TIME_SERIES as the + // DatasetType, Amazon Forecast requires item_id, timestamp, and demand fields to + // be present in your data. For more information, see Importing datasets + // (https://docs.aws.amazon.com/forecast/latest/dg/howitworks-datasets-groups.html). // // This member is required. Domain types.Domain @@ -78,8 +84,9 @@ type CreateDatasetInput struct { // The schema for the dataset. The schema attributes and their order must match the // fields in your data. The dataset Domain and DatasetType that you choose // determine the minimum required fields in your training data. For information - // about the required fields for a specific dataset domain and type, see - // howitworks-domains-ds-types. + // about the required fields for a specific dataset domain and type, see Dataset + // Domains and Dataset Types + // (https://docs.aws.amazon.com/forecast/latest/dg/howitworks-domains-ds-types.html). // // This member is required. Schema *types.Schema diff --git a/service/forecast/api_op_CreateDatasetGroup.go b/service/forecast/api_op_CreateDatasetGroup.go index 9ce27b81a99..918db543904 100644 --- a/service/forecast/api_op_CreateDatasetGroup.go +++ b/service/forecast/api_op_CreateDatasetGroup.go @@ -13,12 +13,19 @@ import ( // Creates a dataset group, which holds a collection of related datasets. You can // add datasets to the dataset group when you create the dataset group, or later by -// using the UpdateDatasetGroup operation. After creating a dataset group and -// adding datasets, you use the dataset group when you create a predictor. For more -// information, see howitworks-datasets-groups. To get a list of all your datasets -// groups, use the ListDatasetGroups operation. The Status of a dataset group must -// be ACTIVE before you can use the dataset group to create a predictor. To get the -// status, use the DescribeDatasetGroup operation. +// using the UpdateDatasetGroup +// (https://docs.aws.amazon.com/forecast/latest/dg/API_UpdateDatasetGroup.html) +// operation. After creating a dataset group and adding datasets, you use the +// dataset group when you create a predictor. For more information, see Dataset +// groups +// (https://docs.aws.amazon.com/forecast/latest/dg/howitworks-datasets-groups.html). +// To get a list of all your datasets groups, use the ListDatasetGroups +// (https://docs.aws.amazon.com/forecast/latest/dg/API_ListDatasetGroups.html) +// operation. The Status of a dataset group must be ACTIVE before you can use the +// dataset group to create a predictor. To get the status, use the +// DescribeDatasetGroup +// (https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDatasetGroup.html) +// operation. func (c *Client) CreateDatasetGroup(ctx context.Context, params *CreateDatasetGroupInput, optFns ...func(*Options)) (*CreateDatasetGroupOutput, error) { if params == nil { params = &CreateDatasetGroupInput{} @@ -43,12 +50,14 @@ type CreateDatasetGroupInput struct { // The domain associated with the dataset group. When you add a dataset to a // dataset group, this value and the value specified for the Domain parameter of - // the CreateDataset operation must match. The Domain and DatasetType that you - // choose determine the fields that must be present in training data that you - // import to a dataset. For example, if you choose the RETAIL domain and - // TARGET_TIME_SERIES as the DatasetType, Amazon Forecast requires that item_id, - // timestamp, and demand fields are present in your data. For more information, see - // howitworks-datasets-groups. + // the CreateDataset + // (https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDataset.html) + // operation must match. The Domain and DatasetType that you choose determine the + // fields that must be present in training data that you import to a dataset. For + // example, if you choose the RETAIL domain and TARGET_TIME_SERIES as the + // DatasetType, Amazon Forecast requires that item_id, timestamp, and demand fields + // are present in your data. For more information, see Dataset groups + // (https://docs.aws.amazon.com/forecast/latest/dg/howitworks-datasets-groups.html). // // This member is required. Domain types.Domain diff --git a/service/forecast/api_op_CreateDatasetImportJob.go b/service/forecast/api_op_CreateDatasetImportJob.go index 85d49e2b773..a6afa44a3ed 100644 --- a/service/forecast/api_op_CreateDatasetImportJob.go +++ b/service/forecast/api_op_CreateDatasetImportJob.go @@ -14,19 +14,24 @@ import ( // Imports your training data to an Amazon Forecast dataset. You provide the // location of your training data in an Amazon Simple Storage Service (Amazon S3) // bucket and the Amazon Resource Name (ARN) of the dataset that you want to import -// the data to. You must specify a DataSource object that includes an AWS Identity -// and Access Management (IAM) role that Amazon Forecast can assume to access the -// data, as Amazon Forecast makes a copy of your data and processes it in an -// internal AWS system. For more information, see aws-forecast-iam-roles. The -// training data must be in CSV format. The delimiter must be a comma (,). You can -// specify the path to a specific CSV file, the S3 bucket, or to a folder in the S3 -// bucket. For the latter two cases, Amazon Forecast imports all files up to the -// limit of 10,000 files. Because dataset imports are not aggregated, your most +// the data to. You must specify a DataSource +// (https://docs.aws.amazon.com/forecast/latest/dg/API_DataSource.html) object that +// includes an AWS Identity and Access Management (IAM) role that Amazon Forecast +// can assume to access the data, as Amazon Forecast makes a copy of your data and +// processes it in an internal AWS system. For more information, see Set up +// permissions +// (https://docs.aws.amazon.com/forecast/latest/dg/aws-forecast-iam-roles.html). +// The training data must be in CSV format. The delimiter must be a comma (,). You +// can specify the path to a specific CSV file, the S3 bucket, or to a folder in +// the S3 bucket. For the latter two cases, Amazon Forecast imports all files up to +// the limit of 10,000 files. Because dataset imports are not aggregated, your most // recent dataset import is the one that is used when training a predictor or // generating a forecast. Make sure that your most recent dataset import contains // all of the data you want to model off of, and not just the new data collected // since the previous import. To get a list of all your dataset import jobs, -// filtered by specified criteria, use the ListDatasetImportJobs operation. +// filtered by specified criteria, use the ListDatasetImportJobs +// (https://docs.aws.amazon.com/forecast/latest/dg/API_ListDatasetImportJobs.html) +// operation. func (c *Client) CreateDatasetImportJob(ctx context.Context, params *CreateDatasetImportJobInput, optFns ...func(*Options)) (*CreateDatasetImportJobOutput, error) { if params == nil { params = &CreateDatasetImportJobInput{} @@ -50,7 +55,9 @@ type CreateDatasetImportJobInput struct { // DataSource must include an AWS Key Management Service (KMS) key and the IAM role // must allow Amazon Forecast permission to access the key. The KMS key and IAM // role must match those specified in the EncryptionConfig parameter of the - // CreateDataset operation. + // CreateDataset + // (https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDataset.html) + // operation. // // This member is required. DataSource *types.DataSource diff --git a/service/forecast/api_op_CreateExplainability.go b/service/forecast/api_op_CreateExplainability.go index 68aa78a27f0..d9b8dc70e41 100644 --- a/service/forecast/api_op_CreateExplainability.go +++ b/service/forecast/api_op_CreateExplainability.go @@ -124,7 +124,7 @@ type CreateExplainabilityInput struct { // Service (KMS) key. DataSource *types.DataSource - // Create an Expainability visualization that is viewable within the AWS console. + // Create an Explainability visualization that is viewable within the AWS console. EnableVisualization *bool // If TimePointGranularity is set to SPECIFIC, define the last time point for the diff --git a/service/forecast/api_op_CreateForecast.go b/service/forecast/api_op_CreateForecast.go index 3ecb47d6c8f..47dd6ef13c9 100644 --- a/service/forecast/api_op_CreateForecast.go +++ b/service/forecast/api_op_CreateForecast.go @@ -55,7 +55,9 @@ type CreateForecastInput struct { // specify up to 5 quantiles per forecast. Accepted values include 0.01 to 0.99 // (increments of .01 only) and mean. The mean forecast is different from the // median (0.50) when the distribution is not symmetric (for example, Beta and - // Negative Binomial). The default value is ["0.1", "0.5", "0.9"]. + // Negative Binomial). The default quantiles are the quantiles you specified during + // predictor creation. If you didn't specify quantiles, the default values are + // ["0.1", "0.5", "0.9"]. ForecastTypes []string // The optional metadata that you apply to the forecast to help you categorize and diff --git a/service/forecast/api_op_CreateMonitor.go b/service/forecast/api_op_CreateMonitor.go new file mode 100644 index 00000000000..ea6b1b221ce --- /dev/null +++ b/service/forecast/api_op_CreateMonitor.go @@ -0,0 +1,134 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package forecast + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/forecast/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a predictor monitor resource for an existing auto predictor. Predictor +// monitoring allows you to see how your predictor's performance changes over time. +// For more information, see Predictor Monitoring +// (https://docs.aws.amazon.com/forecast/latest/dg/predictor-monitoring.html). +func (c *Client) CreateMonitor(ctx context.Context, params *CreateMonitorInput, optFns ...func(*Options)) (*CreateMonitorOutput, error) { + if params == nil { + params = &CreateMonitorInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateMonitor", params, optFns, c.addOperationCreateMonitorMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateMonitorOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateMonitorInput struct { + + // The name of the monitor resource. + // + // This member is required. + MonitorName *string + + // The Amazon Resource Name (ARN) of the predictor to monitor. + // + // This member is required. + ResourceArn *string + + // A list of tags + // (https://docs.aws.amazon.com/forecast/latest/dg/tagging-forecast-resources.html) + // to apply to the monitor resource. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type CreateMonitorOutput struct { + + // The Amazon Resource Name (ARN) of the monitor resource. + MonitorArn *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateMonitorMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateMonitor{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateMonitor{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateMonitorValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateMonitor(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateMonitor(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "forecast", + OperationName: "CreateMonitor", + } +} diff --git a/service/forecast/api_op_DeleteDataset.go b/service/forecast/api_op_DeleteDataset.go index b62bc116a77..816338ec1a1 100644 --- a/service/forecast/api_op_DeleteDataset.go +++ b/service/forecast/api_op_DeleteDataset.go @@ -11,11 +11,15 @@ import ( ) // Deletes an Amazon Forecast dataset that was created using the CreateDataset +// (https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDataset.html) // operation. You can only delete datasets that have a status of ACTIVE or -// CREATE_FAILED. To get the status use the DescribeDataset operation. Forecast -// does not automatically update any dataset groups that contain the deleted -// dataset. In order to update the dataset group, use the operation, omitting the -// deleted dataset's ARN. +// CREATE_FAILED. To get the status use the DescribeDataset +// (https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDataset.html) +// operation. Forecast does not automatically update any dataset groups that +// contain the deleted dataset. In order to update the dataset group, use the +// UpdateDatasetGroup +// (https://docs.aws.amazon.com/forecast/latest/dg/API_UpdateDatasetGroup.html) +// operation, omitting the deleted dataset's ARN. func (c *Client) DeleteDataset(ctx context.Context, params *DeleteDatasetInput, optFns ...func(*Options)) (*DeleteDatasetOutput, error) { if params == nil { params = &DeleteDatasetInput{} diff --git a/service/forecast/api_op_DeleteDatasetGroup.go b/service/forecast/api_op_DeleteDatasetGroup.go index 7c6d386c7e3..4189c963d7b 100644 --- a/service/forecast/api_op_DeleteDatasetGroup.go +++ b/service/forecast/api_op_DeleteDatasetGroup.go @@ -10,10 +10,13 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes a dataset group created using the CreateDatasetGroup operation. You can -// only delete dataset groups that have a status of ACTIVE, CREATE_FAILED, or -// UPDATE_FAILED. To get the status, use the DescribeDatasetGroup operation. This -// operation deletes only the dataset group, not the datasets in the group. +// Deletes a dataset group created using the CreateDatasetGroup +// (https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetGroup.html) +// operation. You can only delete dataset groups that have a status of ACTIVE, +// CREATE_FAILED, or UPDATE_FAILED. To get the status, use the DescribeDatasetGroup +// (https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDatasetGroup.html) +// operation. This operation deletes only the dataset group, not the datasets in +// the group. func (c *Client) DeleteDatasetGroup(ctx context.Context, params *DeleteDatasetGroupInput, optFns ...func(*Options)) (*DeleteDatasetGroupOutput, error) { if params == nil { params = &DeleteDatasetGroupInput{} diff --git a/service/forecast/api_op_DeleteDatasetImportJob.go b/service/forecast/api_op_DeleteDatasetImportJob.go index b1d61716d85..6cbc83f5cff 100644 --- a/service/forecast/api_op_DeleteDatasetImportJob.go +++ b/service/forecast/api_op_DeleteDatasetImportJob.go @@ -10,9 +10,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes a dataset import job created using the CreateDatasetImportJob operation. -// You can delete only dataset import jobs that have a status of ACTIVE or -// CREATE_FAILED. To get the status, use the DescribeDatasetImportJob operation. +// Deletes a dataset import job created using the CreateDatasetImportJob +// (https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetImportJob.html) +// operation. You can delete only dataset import jobs that have a status of ACTIVE +// or CREATE_FAILED. To get the status, use the DescribeDatasetImportJob +// (https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDatasetImportJob.html) +// operation. func (c *Client) DeleteDatasetImportJob(ctx context.Context, params *DeleteDatasetImportJobInput, optFns ...func(*Options)) (*DeleteDatasetImportJobOutput, error) { if params == nil { params = &DeleteDatasetImportJobInput{} diff --git a/service/forecast/api_op_DeleteMonitor.go b/service/forecast/api_op_DeleteMonitor.go new file mode 100644 index 00000000000..c8500085a40 --- /dev/null +++ b/service/forecast/api_op_DeleteMonitor.go @@ -0,0 +1,117 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package forecast + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a monitor resource. You can only delete a monitor resource with a status +// of ACTIVE, ACTIVE_STOPPED, CREATE_FAILED, or CREATE_STOPPED. +func (c *Client) DeleteMonitor(ctx context.Context, params *DeleteMonitorInput, optFns ...func(*Options)) (*DeleteMonitorOutput, error) { + if params == nil { + params = &DeleteMonitorInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteMonitor", params, optFns, c.addOperationDeleteMonitorMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteMonitorOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteMonitorInput struct { + + // The Amazon Resource Name (ARN) of the monitor resource to delete. + // + // This member is required. + MonitorArn *string + + noSmithyDocumentSerde +} + +type DeleteMonitorOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteMonitorMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteMonitor{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteMonitor{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteMonitorValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteMonitor(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteMonitor(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "forecast", + OperationName: "DeleteMonitor", + } +} diff --git a/service/forecast/api_op_DescribeAutoPredictor.go b/service/forecast/api_op_DescribeAutoPredictor.go index 6d608ae04d4..837bab26d72 100644 --- a/service/forecast/api_op_DescribeAutoPredictor.go +++ b/service/forecast/api_op_DescribeAutoPredictor.go @@ -100,6 +100,9 @@ type DescribeAutoPredictorOutput struct { // In the event of an error, a message detailing the cause of the error. Message *string + // A object with the Amazon Resource Name (ARN) and status of the monitor resource. + MonitorInfo *types.MonitorInfo + // The accuracy metric used to optimize the predictor. OptimizationMetric types.OptimizationMetric diff --git a/service/forecast/api_op_DescribeDataset.go b/service/forecast/api_op_DescribeDataset.go index 15f179b4263..d819caa54c4 100644 --- a/service/forecast/api_op_DescribeDataset.go +++ b/service/forecast/api_op_DescribeDataset.go @@ -12,14 +12,15 @@ import ( "time" ) -// Describes an Amazon Forecast dataset created using the CreateDataset operation. -// In addition to listing the parameters specified in the CreateDataset request, -// this operation includes the following dataset properties: -// -// * CreationTime +// Describes an Amazon Forecast dataset created using the CreateDataset +// (https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDataset.html) +// operation. In addition to listing the parameters specified in the CreateDataset +// request, this operation includes the following dataset properties: // // * -// LastModificationTime +// CreationTime +// +// * LastModificationTime // // * Status func (c *Client) DescribeDataset(ctx context.Context, params *DescribeDatasetInput, optFns ...func(*Options)) (*DescribeDatasetOutput, error) { @@ -76,8 +77,10 @@ type DescribeDatasetOutput struct { // When you create a dataset, LastModificationTime is the same as CreationTime. // While data is being imported to the dataset, LastModificationTime is the current - // time of the DescribeDataset call. After a CreateDatasetImportJob operation has - // finished, LastModificationTime is when the import job completed or failed. + // time of the DescribeDataset call. After a CreateDatasetImportJob + // (https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetImportJob.html) + // operation has finished, LastModificationTime is when the import job completed or + // failed. LastModificationTime *time.Time // An array of SchemaAttribute objects that specify the dataset fields. Each @@ -98,10 +101,12 @@ type DescribeDatasetOutput struct { // // The UPDATE // states apply while data is imported to the dataset from a call to the - // CreateDatasetImportJob operation and reflect the status of the dataset import - // job. For example, when the import job status is CREATE_IN_PROGRESS, the status - // of the dataset is UPDATE_IN_PROGRESS. The Status of the dataset must be ACTIVE - // before you can import training data. + // CreateDatasetImportJob + // (https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetImportJob.html) + // operation and reflect the status of the dataset import job. For example, when + // the import job status is CREATE_IN_PROGRESS, the status of the dataset is + // UPDATE_IN_PROGRESS. The Status of the dataset must be ACTIVE before you can + // import training data. Status *string // Metadata pertaining to the operation's result. diff --git a/service/forecast/api_op_DescribeDatasetGroup.go b/service/forecast/api_op_DescribeDatasetGroup.go index 199136d0725..9642726170b 100644 --- a/service/forecast/api_op_DescribeDatasetGroup.go +++ b/service/forecast/api_op_DescribeDatasetGroup.go @@ -12,16 +12,18 @@ import ( "time" ) -// Describes a dataset group created using the CreateDatasetGroup operation. In -// addition to listing the parameters provided in the CreateDatasetGroup request, -// this operation includes the following properties: +// Describes a dataset group created using the CreateDatasetGroup +// (https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetGroup.html) +// operation. In addition to listing the parameters provided in the +// CreateDatasetGroup request, this operation includes the following properties: // -// * DatasetArns - The datasets -// belonging to the group. +// * +// DatasetArns - The datasets belonging to the group. // // * CreationTime // -// * LastModificationTime +// * +// LastModificationTime // // * Status func (c *Client) DescribeDatasetGroup(ctx context.Context, params *DescribeDatasetGroupInput, optFns ...func(*Options)) (*DescribeDatasetGroupOutput, error) { @@ -68,8 +70,10 @@ type DescribeDatasetGroupOutput struct { Domain types.Domain // When the dataset group was created or last updated from a call to the - // UpdateDatasetGroup operation. While the dataset group is being updated, - // LastModificationTime is the current time of the DescribeDatasetGroup call. + // UpdateDatasetGroup + // (https://docs.aws.amazon.com/forecast/latest/dg/API_UpdateDatasetGroup.html) + // operation. While the dataset group is being updated, LastModificationTime is the + // current time of the DescribeDatasetGroup call. LastModificationTime *time.Time // The status of the dataset group. States include: @@ -85,9 +89,10 @@ type DescribeDatasetGroupOutput struct { // * UPDATE_PENDING, UPDATE_IN_PROGRESS, UPDATE_FAILED // // The UPDATE - // states apply when you call the UpdateDatasetGroup operation. The Status of the - // dataset group must be ACTIVE before you can use the dataset group to create a - // predictor. + // states apply when you call the UpdateDatasetGroup + // (https://docs.aws.amazon.com/forecast/latest/dg/API_UpdateDatasetGroup.html) + // operation. The Status of the dataset group must be ACTIVE before you can use the + // dataset group to create a predictor. Status *string // Metadata pertaining to the operation's result. diff --git a/service/forecast/api_op_DescribeDatasetImportJob.go b/service/forecast/api_op_DescribeDatasetImportJob.go index 40cd4fd5a42..edc95982491 100644 --- a/service/forecast/api_op_DescribeDatasetImportJob.go +++ b/service/forecast/api_op_DescribeDatasetImportJob.go @@ -13,6 +13,7 @@ import ( ) // Describes a dataset import job created using the CreateDatasetImportJob +// (https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetImportJob.html) // operation. In addition to listing the parameters provided in the // CreateDatasetImportJob request, this operation includes the following // properties: diff --git a/service/forecast/api_op_DescribeMonitor.go b/service/forecast/api_op_DescribeMonitor.go new file mode 100644 index 00000000000..c7c30276191 --- /dev/null +++ b/service/forecast/api_op_DescribeMonitor.go @@ -0,0 +1,171 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package forecast + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/forecast/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Describes a monitor resource. In addition to listing the properties provided in +// the CreateMonitor request, this operation lists the following properties: +// +// * +// Baseline +// +// * CreationTime +// +// * LastEvaluationTime +// +// * LastEvaluationState +// +// * +// LastModificationTime +// +// * Message +// +// * Status +func (c *Client) DescribeMonitor(ctx context.Context, params *DescribeMonitorInput, optFns ...func(*Options)) (*DescribeMonitorOutput, error) { + if params == nil { + params = &DescribeMonitorInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeMonitor", params, optFns, c.addOperationDescribeMonitorMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeMonitorOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeMonitorInput struct { + + // The Amazon Resource Name (ARN) of the monitor resource to describe. + // + // This member is required. + MonitorArn *string + + noSmithyDocumentSerde +} + +type DescribeMonitorOutput struct { + + // Metrics you can use as a baseline for comparison purposes. Use these values you + // interpret monitoring results for an auto predictor. + Baseline *types.Baseline + + // The timestamp for when the monitor resource was created. + CreationTime *time.Time + + // The estimated number of minutes remaining before the monitor resource finishes + // its current evaluation. + EstimatedEvaluationTimeRemainingInMinutes *int64 + + // The state of the monitor's latest evaluation. + LastEvaluationState *string + + // The timestamp of the latest evaluation completed by the monitor. + LastEvaluationTime *time.Time + + // The timestamp of the latest modification to the monitor. + LastModificationTime *time.Time + + // An error message, if any, for the monitor. + Message *string + + // The Amazon Resource Name (ARN) of the monitor resource described. + MonitorArn *string + + // The name of the monitor. + MonitorName *string + + // The Amazon Resource Name (ARN) of the auto predictor being monitored. + ResourceArn *string + + // The status of the monitor resource. + Status *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeMonitorMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeMonitor{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeMonitor{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeMonitorValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeMonitor(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeMonitor(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "forecast", + OperationName: "DescribeMonitor", + } +} diff --git a/service/forecast/api_op_ListDatasetGroups.go b/service/forecast/api_op_ListDatasetGroups.go index 36995358749..208b3b2aeb9 100644 --- a/service/forecast/api_op_ListDatasetGroups.go +++ b/service/forecast/api_op_ListDatasetGroups.go @@ -12,10 +12,13 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns a list of dataset groups created using the CreateDatasetGroup operation. -// For each dataset group, this operation returns a summary of its properties, -// including its Amazon Resource Name (ARN). You can retrieve the complete set of -// properties by using the dataset group ARN with the DescribeDatasetGroup +// Returns a list of dataset groups created using the CreateDatasetGroup +// (https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetGroup.html) +// operation. For each dataset group, this operation returns a summary of its +// properties, including its Amazon Resource Name (ARN). You can retrieve the +// complete set of properties by using the dataset group ARN with the +// DescribeDatasetGroup +// (https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDatasetGroup.html) // operation. func (c *Client) ListDatasetGroups(ctx context.Context, params *ListDatasetGroupsInput, optFns ...func(*Options)) (*ListDatasetGroupsOutput, error) { if params == nil { diff --git a/service/forecast/api_op_ListDatasetImportJobs.go b/service/forecast/api_op_ListDatasetImportJobs.go index d4d95d93f11..d3d4627e3d6 100644 --- a/service/forecast/api_op_ListDatasetImportJobs.go +++ b/service/forecast/api_op_ListDatasetImportJobs.go @@ -13,10 +13,13 @@ import ( ) // Returns a list of dataset import jobs created using the CreateDatasetImportJob +// (https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetImportJob.html) // operation. For each import job, this operation returns a summary of its // properties, including its Amazon Resource Name (ARN). You can retrieve the // complete set of properties by using the ARN with the DescribeDatasetImportJob -// operation. You can filter the list by providing an array of Filter objects. +// (https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDatasetImportJob.html) +// operation. You can filter the list by providing an array of Filter +// (https://docs.aws.amazon.com/forecast/latest/dg/API_Filter.html) objects. func (c *Client) ListDatasetImportJobs(ctx context.Context, params *ListDatasetImportJobsInput, optFns ...func(*Options)) (*ListDatasetImportJobsOutput, error) { if params == nil { params = &ListDatasetImportJobsInput{} diff --git a/service/forecast/api_op_ListDatasets.go b/service/forecast/api_op_ListDatasets.go index 0b320f7c8c5..c218040f23d 100644 --- a/service/forecast/api_op_ListDatasets.go +++ b/service/forecast/api_op_ListDatasets.go @@ -12,10 +12,13 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns a list of datasets created using the CreateDataset operation. For each -// dataset, a summary of its properties, including its Amazon Resource Name (ARN), -// is returned. To retrieve the complete set of properties, use the ARN with the -// DescribeDataset operation. +// Returns a list of datasets created using the CreateDataset +// (https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDataset.html) +// operation. For each dataset, a summary of its properties, including its Amazon +// Resource Name (ARN), is returned. To retrieve the complete set of properties, +// use the ARN with the DescribeDataset +// (https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDataset.html) +// operation. func (c *Client) ListDatasets(ctx context.Context, params *ListDatasetsInput, optFns ...func(*Options)) (*ListDatasetsOutput, error) { if params == nil { params = &ListDatasetsInput{} diff --git a/service/forecast/api_op_ListExplainabilities.go b/service/forecast/api_op_ListExplainabilities.go index 2e75a4c5057..8dfd3135d15 100644 --- a/service/forecast/api_op_ListExplainabilities.go +++ b/service/forecast/api_op_ListExplainabilities.go @@ -4,6 +4,7 @@ package forecast import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/forecast/types" @@ -138,6 +139,97 @@ func (c *Client) addOperationListExplainabilitiesMiddlewares(stack *middleware.S return nil } +// ListExplainabilitiesAPIClient is a client that implements the +// ListExplainabilities operation. +type ListExplainabilitiesAPIClient interface { + ListExplainabilities(context.Context, *ListExplainabilitiesInput, ...func(*Options)) (*ListExplainabilitiesOutput, error) +} + +var _ ListExplainabilitiesAPIClient = (*Client)(nil) + +// ListExplainabilitiesPaginatorOptions is the paginator options for +// ListExplainabilities +type ListExplainabilitiesPaginatorOptions struct { + // The number of items returned in the response. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListExplainabilitiesPaginator is a paginator for ListExplainabilities +type ListExplainabilitiesPaginator struct { + options ListExplainabilitiesPaginatorOptions + client ListExplainabilitiesAPIClient + params *ListExplainabilitiesInput + nextToken *string + firstPage bool +} + +// NewListExplainabilitiesPaginator returns a new ListExplainabilitiesPaginator +func NewListExplainabilitiesPaginator(client ListExplainabilitiesAPIClient, params *ListExplainabilitiesInput, optFns ...func(*ListExplainabilitiesPaginatorOptions)) *ListExplainabilitiesPaginator { + if params == nil { + params = &ListExplainabilitiesInput{} + } + + options := ListExplainabilitiesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListExplainabilitiesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListExplainabilitiesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListExplainabilities page. +func (p *ListExplainabilitiesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListExplainabilitiesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListExplainabilities(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + func newServiceMetadataMiddleware_opListExplainabilities(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/service/forecast/api_op_ListExplainabilityExports.go b/service/forecast/api_op_ListExplainabilityExports.go index 2deb08a4fa9..d452f89ae10 100644 --- a/service/forecast/api_op_ListExplainabilityExports.go +++ b/service/forecast/api_op_ListExplainabilityExports.go @@ -4,6 +4,7 @@ package forecast import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/forecast/types" @@ -137,6 +138,98 @@ func (c *Client) addOperationListExplainabilityExportsMiddlewares(stack *middlew return nil } +// ListExplainabilityExportsAPIClient is a client that implements the +// ListExplainabilityExports operation. +type ListExplainabilityExportsAPIClient interface { + ListExplainabilityExports(context.Context, *ListExplainabilityExportsInput, ...func(*Options)) (*ListExplainabilityExportsOutput, error) +} + +var _ ListExplainabilityExportsAPIClient = (*Client)(nil) + +// ListExplainabilityExportsPaginatorOptions is the paginator options for +// ListExplainabilityExports +type ListExplainabilityExportsPaginatorOptions struct { + // The number of items to return in the response. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListExplainabilityExportsPaginator is a paginator for ListExplainabilityExports +type ListExplainabilityExportsPaginator struct { + options ListExplainabilityExportsPaginatorOptions + client ListExplainabilityExportsAPIClient + params *ListExplainabilityExportsInput + nextToken *string + firstPage bool +} + +// NewListExplainabilityExportsPaginator returns a new +// ListExplainabilityExportsPaginator +func NewListExplainabilityExportsPaginator(client ListExplainabilityExportsAPIClient, params *ListExplainabilityExportsInput, optFns ...func(*ListExplainabilityExportsPaginatorOptions)) *ListExplainabilityExportsPaginator { + if params == nil { + params = &ListExplainabilityExportsInput{} + } + + options := ListExplainabilityExportsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListExplainabilityExportsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListExplainabilityExportsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListExplainabilityExports page. +func (p *ListExplainabilityExportsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListExplainabilityExportsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListExplainabilityExports(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + func newServiceMetadataMiddleware_opListExplainabilityExports(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/service/forecast/api_op_ListMonitorEvaluations.go b/service/forecast/api_op_ListMonitorEvaluations.go new file mode 100644 index 00000000000..749abfb89ca --- /dev/null +++ b/service/forecast/api_op_ListMonitorEvaluations.go @@ -0,0 +1,257 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package forecast + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/forecast/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of the monitoring evaluation results and predictor events +// collected by the monitor resource during different windows of time. For +// information about monitoring see Viewing Monitoring Results +// (https://docs.aws.amazon.com/forecast/latest/dg/predictor-monitoring-results.html). +// For more information about retrieving monitoring results see Viewing Monitoring +// Results +// (https://docs.aws.amazon.com/forecast/latest/dg/predictor-monitoring-results.html). +func (c *Client) ListMonitorEvaluations(ctx context.Context, params *ListMonitorEvaluationsInput, optFns ...func(*Options)) (*ListMonitorEvaluationsOutput, error) { + if params == nil { + params = &ListMonitorEvaluationsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListMonitorEvaluations", params, optFns, c.addOperationListMonitorEvaluationsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListMonitorEvaluationsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListMonitorEvaluationsInput struct { + + // The Amazon Resource Name (ARN) of the monitor resource to get results from. + // + // This member is required. + MonitorArn *string + + // An array of filters. For each filter, provide a condition and a match statement. + // The condition is either IS or IS_NOT, which specifies whether to include or + // exclude the resources that match the statement from the list. The match + // statement consists of a key and a value. Filter properties + // + // * Condition - The + // condition to apply. Valid values are IS and IS_NOT. + // + // * Key - The name of the + // parameter to filter on. The only valid value is EvaluationState. + // + // * Value - The + // value to match. Valid values are only SUCCESS or FAILURE. + // + // For example, to list + // only successful monitor evaluations, you would specify: "Filters": [ { + // "Condition": "IS", "Key": "EvaluationState", "Value": "SUCCESS" } ] + Filters []types.Filter + + // The maximum number of monitoring results to return. + MaxResults *int32 + + // If the result of the previous request was truncated, the response includes a + // NextToken. To retrieve the next set of results, use the token in the next + // request. Tokens expire after 24 hours. + NextToken *string + + noSmithyDocumentSerde +} + +type ListMonitorEvaluationsOutput struct { + + // If the response is truncated, Amazon Forecast returns this token. To retrieve + // the next set of results, use the token in the next request. Tokens expire after + // 24 hours. + NextToken *string + + // The monitoring results and predictor events collected by the monitor resource + // during different windows of time. For information about monitoring see Viewing + // Monitoring Results + // (https://docs.aws.amazon.com/forecast/latest/dg/predictor-monitoring-results.html). + // For more information about retrieving monitoring results see Viewing Monitoring + // Results + // (https://docs.aws.amazon.com/forecast/latest/dg/predictor-monitoring-results.html). + PredictorMonitorEvaluations []types.PredictorMonitorEvaluation + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListMonitorEvaluationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListMonitorEvaluations{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListMonitorEvaluations{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListMonitorEvaluationsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListMonitorEvaluations(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListMonitorEvaluationsAPIClient is a client that implements the +// ListMonitorEvaluations operation. +type ListMonitorEvaluationsAPIClient interface { + ListMonitorEvaluations(context.Context, *ListMonitorEvaluationsInput, ...func(*Options)) (*ListMonitorEvaluationsOutput, error) +} + +var _ ListMonitorEvaluationsAPIClient = (*Client)(nil) + +// ListMonitorEvaluationsPaginatorOptions is the paginator options for +// ListMonitorEvaluations +type ListMonitorEvaluationsPaginatorOptions struct { + // The maximum number of monitoring results to return. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListMonitorEvaluationsPaginator is a paginator for ListMonitorEvaluations +type ListMonitorEvaluationsPaginator struct { + options ListMonitorEvaluationsPaginatorOptions + client ListMonitorEvaluationsAPIClient + params *ListMonitorEvaluationsInput + nextToken *string + firstPage bool +} + +// NewListMonitorEvaluationsPaginator returns a new ListMonitorEvaluationsPaginator +func NewListMonitorEvaluationsPaginator(client ListMonitorEvaluationsAPIClient, params *ListMonitorEvaluationsInput, optFns ...func(*ListMonitorEvaluationsPaginatorOptions)) *ListMonitorEvaluationsPaginator { + if params == nil { + params = &ListMonitorEvaluationsInput{} + } + + options := ListMonitorEvaluationsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListMonitorEvaluationsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListMonitorEvaluationsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListMonitorEvaluations page. +func (p *ListMonitorEvaluationsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListMonitorEvaluationsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListMonitorEvaluations(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListMonitorEvaluations(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "forecast", + OperationName: "ListMonitorEvaluations", + } +} diff --git a/service/forecast/api_op_ListMonitors.go b/service/forecast/api_op_ListMonitors.go new file mode 100644 index 00000000000..30f067082d3 --- /dev/null +++ b/service/forecast/api_op_ListMonitors.go @@ -0,0 +1,241 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package forecast + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/forecast/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of monitors created with the CreateMonitor operation and +// CreateAutoPredictor operation. For each monitor resource, this operation returns +// of a summary of its properties, including its Amazon Resource Name (ARN). You +// can retrieve a complete set of properties of a monitor resource by specify the +// monitor's ARN in the DescribeMonitor operation. +func (c *Client) ListMonitors(ctx context.Context, params *ListMonitorsInput, optFns ...func(*Options)) (*ListMonitorsOutput, error) { + if params == nil { + params = &ListMonitorsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListMonitors", params, optFns, c.addOperationListMonitorsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListMonitorsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListMonitorsInput struct { + + // An array of filters. For each filter, provide a condition and a match statement. + // The condition is either IS or IS_NOT, which specifies whether to include or + // exclude the resources that match the statement from the list. The match + // statement consists of a key and a value. Filter properties + // + // * Condition - The + // condition to apply. Valid values are IS and IS_NOT. + // + // * Key - The name of the + // parameter to filter on. The only valid value is Status. + // + // * Value - The value to + // match. + // + // For example, to list all monitors who's status is ACTIVE, you would + // specify: "Filters": [ { "Condition": "IS", "Key": "Status", "Value": "ACTIVE" } + // ] + Filters []types.Filter + + // The maximum number of monitors to include in the response. + MaxResults *int32 + + // If the result of the previous request was truncated, the response includes a + // NextToken. To retrieve the next set of results, use the token in the next + // request. Tokens expire after 24 hours. + NextToken *string + + noSmithyDocumentSerde +} + +type ListMonitorsOutput struct { + + // An array of objects that summarize each monitor's properties. + Monitors []types.MonitorSummary + + // If the response is truncated, Amazon Forecast returns this token. To retrieve + // the next set of results, use the token in the next request. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListMonitorsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListMonitors{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListMonitors{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListMonitorsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListMonitors(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListMonitorsAPIClient is a client that implements the ListMonitors operation. +type ListMonitorsAPIClient interface { + ListMonitors(context.Context, *ListMonitorsInput, ...func(*Options)) (*ListMonitorsOutput, error) +} + +var _ ListMonitorsAPIClient = (*Client)(nil) + +// ListMonitorsPaginatorOptions is the paginator options for ListMonitors +type ListMonitorsPaginatorOptions struct { + // The maximum number of monitors to include in the response. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListMonitorsPaginator is a paginator for ListMonitors +type ListMonitorsPaginator struct { + options ListMonitorsPaginatorOptions + client ListMonitorsAPIClient + params *ListMonitorsInput + nextToken *string + firstPage bool +} + +// NewListMonitorsPaginator returns a new ListMonitorsPaginator +func NewListMonitorsPaginator(client ListMonitorsAPIClient, params *ListMonitorsInput, optFns ...func(*ListMonitorsPaginatorOptions)) *ListMonitorsPaginator { + if params == nil { + params = &ListMonitorsInput{} + } + + options := ListMonitorsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListMonitorsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListMonitorsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListMonitors page. +func (p *ListMonitorsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListMonitorsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListMonitors(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListMonitors(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "forecast", + OperationName: "ListMonitors", + } +} diff --git a/service/forecast/api_op_ResumeResource.go b/service/forecast/api_op_ResumeResource.go new file mode 100644 index 00000000000..ba357a9f219 --- /dev/null +++ b/service/forecast/api_op_ResumeResource.go @@ -0,0 +1,116 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package forecast + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Resumes a stopped monitor resource. +func (c *Client) ResumeResource(ctx context.Context, params *ResumeResourceInput, optFns ...func(*Options)) (*ResumeResourceOutput, error) { + if params == nil { + params = &ResumeResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ResumeResource", params, optFns, c.addOperationResumeResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ResumeResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ResumeResourceInput struct { + + // The Amazon Resource Name (ARN) of the monitor resource to resume. + // + // This member is required. + ResourceArn *string + + noSmithyDocumentSerde +} + +type ResumeResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationResumeResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpResumeResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpResumeResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpResumeResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opResumeResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opResumeResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "forecast", + OperationName: "ResumeResource", + } +} diff --git a/service/forecast/api_op_UpdateDatasetGroup.go b/service/forecast/api_op_UpdateDatasetGroup.go index 05ef6f25e7e..5244084e012 100644 --- a/service/forecast/api_op_UpdateDatasetGroup.go +++ b/service/forecast/api_op_UpdateDatasetGroup.go @@ -12,7 +12,9 @@ import ( // Replaces the datasets in a dataset group with the specified datasets. The Status // of the dataset group must be ACTIVE before you can use the dataset group to -// create a predictor. Use the DescribeDatasetGroup operation to get the status. +// create a predictor. Use the DescribeDatasetGroup +// (https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDatasetGroup.html) +// operation to get the status. func (c *Client) UpdateDatasetGroup(ctx context.Context, params *UpdateDatasetGroupInput, optFns ...func(*Options)) (*UpdateDatasetGroupOutput, error) { if params == nil { params = &UpdateDatasetGroupInput{} diff --git a/service/forecast/deserializers.go b/service/forecast/deserializers.go index d7917739a36..d36480dd850 100644 --- a/service/forecast/deserializers.go +++ b/service/forecast/deserializers.go @@ -999,6 +999,129 @@ func awsAwsjson11_deserializeOpErrorCreateForecastExportJob(response *smithyhttp } } +type awsAwsjson11_deserializeOpCreateMonitor struct { +} + +func (*awsAwsjson11_deserializeOpCreateMonitor) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateMonitor) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateMonitor(response, &metadata) + } + output := &CreateMonitorOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateMonitorOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateMonitor(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidInputException", errorCode): + return awsAwsjson11_deserializeErrorInvalidInputException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ResourceAlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorResourceAlreadyExistsException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson11_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsAwsjson11_deserializeOpCreatePredictor struct { } @@ -1910,6 +2033,101 @@ func awsAwsjson11_deserializeOpErrorDeleteForecastExportJob(response *smithyhttp } } +type awsAwsjson11_deserializeOpDeleteMonitor struct { +} + +func (*awsAwsjson11_deserializeOpDeleteMonitor) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteMonitor) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteMonitor(response, &metadata) + } + output := &DeleteMonitorOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteMonitor(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidInputException", errorCode): + return awsAwsjson11_deserializeErrorInvalidInputException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson11_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsAwsjson11_deserializeOpDeletePredictor struct { } @@ -3107,14 +3325,14 @@ func awsAwsjson11_deserializeOpErrorDescribeForecastExportJob(response *smithyht } } -type awsAwsjson11_deserializeOpDescribePredictor struct { +type awsAwsjson11_deserializeOpDescribeMonitor struct { } -func (*awsAwsjson11_deserializeOpDescribePredictor) ID() string { +func (*awsAwsjson11_deserializeOpDescribeMonitor) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribePredictor) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribeMonitor) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3128,9 +3346,9 @@ func (m *awsAwsjson11_deserializeOpDescribePredictor) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribePredictor(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeMonitor(response, &metadata) } - output := &DescribePredictorOutput{} + output := &DescribeMonitorOutput{} out.Result = output var buff [1024]byte @@ -3150,7 +3368,7 @@ func (m *awsAwsjson11_deserializeOpDescribePredictor) HandleDeserialize(ctx cont return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribePredictorOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribeMonitorOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3164,7 +3382,7 @@ func (m *awsAwsjson11_deserializeOpDescribePredictor) HandleDeserialize(ctx cont return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribePredictor(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribeMonitor(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3221,14 +3439,14 @@ func awsAwsjson11_deserializeOpErrorDescribePredictor(response *smithyhttp.Respo } } -type awsAwsjson11_deserializeOpDescribePredictorBacktestExportJob struct { +type awsAwsjson11_deserializeOpDescribePredictor struct { } -func (*awsAwsjson11_deserializeOpDescribePredictorBacktestExportJob) ID() string { +func (*awsAwsjson11_deserializeOpDescribePredictor) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribePredictorBacktestExportJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribePredictor) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3242,9 +3460,9 @@ func (m *awsAwsjson11_deserializeOpDescribePredictorBacktestExportJob) HandleDes } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribePredictorBacktestExportJob(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribePredictor(response, &metadata) } - output := &DescribePredictorBacktestExportJobOutput{} + output := &DescribePredictorOutput{} out.Result = output var buff [1024]byte @@ -3264,7 +3482,7 @@ func (m *awsAwsjson11_deserializeOpDescribePredictorBacktestExportJob) HandleDes return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribePredictorBacktestExportJobOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribePredictorOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3278,7 +3496,7 @@ func (m *awsAwsjson11_deserializeOpDescribePredictorBacktestExportJob) HandleDes return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribePredictorBacktestExportJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribePredictor(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3335,14 +3553,14 @@ func awsAwsjson11_deserializeOpErrorDescribePredictorBacktestExportJob(response } } -type awsAwsjson11_deserializeOpGetAccuracyMetrics struct { +type awsAwsjson11_deserializeOpDescribePredictorBacktestExportJob struct { } -func (*awsAwsjson11_deserializeOpGetAccuracyMetrics) ID() string { +func (*awsAwsjson11_deserializeOpDescribePredictorBacktestExportJob) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpGetAccuracyMetrics) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribePredictorBacktestExportJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3356,9 +3574,9 @@ func (m *awsAwsjson11_deserializeOpGetAccuracyMetrics) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorGetAccuracyMetrics(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribePredictorBacktestExportJob(response, &metadata) } - output := &GetAccuracyMetricsOutput{} + output := &DescribePredictorBacktestExportJobOutput{} out.Result = output var buff [1024]byte @@ -3378,7 +3596,7 @@ func (m *awsAwsjson11_deserializeOpGetAccuracyMetrics) HandleDeserialize(ctx con return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentGetAccuracyMetricsOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribePredictorBacktestExportJobOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3392,7 +3610,7 @@ func (m *awsAwsjson11_deserializeOpGetAccuracyMetrics) HandleDeserialize(ctx con return out, metadata, err } -func awsAwsjson11_deserializeOpErrorGetAccuracyMetrics(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribePredictorBacktestExportJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3436,9 +3654,6 @@ func awsAwsjson11_deserializeOpErrorGetAccuracyMetrics(response *smithyhttp.Resp case strings.EqualFold("InvalidInputException", errorCode): return awsAwsjson11_deserializeErrorInvalidInputException(response, errorBody) - case strings.EqualFold("ResourceInUseException", errorCode): - return awsAwsjson11_deserializeErrorResourceInUseException(response, errorBody) - case strings.EqualFold("ResourceNotFoundException", errorCode): return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) @@ -3452,14 +3667,14 @@ func awsAwsjson11_deserializeOpErrorGetAccuracyMetrics(response *smithyhttp.Resp } } -type awsAwsjson11_deserializeOpListDatasetGroups struct { +type awsAwsjson11_deserializeOpGetAccuracyMetrics struct { } -func (*awsAwsjson11_deserializeOpListDatasetGroups) ID() string { +func (*awsAwsjson11_deserializeOpGetAccuracyMetrics) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListDatasetGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpGetAccuracyMetrics) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3473,9 +3688,9 @@ func (m *awsAwsjson11_deserializeOpListDatasetGroups) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListDatasetGroups(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorGetAccuracyMetrics(response, &metadata) } - output := &ListDatasetGroupsOutput{} + output := &GetAccuracyMetricsOutput{} out.Result = output var buff [1024]byte @@ -3495,7 +3710,7 @@ func (m *awsAwsjson11_deserializeOpListDatasetGroups) HandleDeserialize(ctx cont return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListDatasetGroupsOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentGetAccuracyMetricsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3509,7 +3724,7 @@ func (m *awsAwsjson11_deserializeOpListDatasetGroups) HandleDeserialize(ctx cont return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListDatasetGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorGetAccuracyMetrics(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3550,8 +3765,14 @@ func awsAwsjson11_deserializeOpErrorListDatasetGroups(response *smithyhttp.Respo } switch { - case strings.EqualFold("InvalidNextTokenException", errorCode): - return awsAwsjson11_deserializeErrorInvalidNextTokenException(response, errorBody) + case strings.EqualFold("InvalidInputException", errorCode): + return awsAwsjson11_deserializeErrorInvalidInputException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson11_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) default: genericError := &smithy.GenericAPIError{ @@ -3563,14 +3784,14 @@ func awsAwsjson11_deserializeOpErrorListDatasetGroups(response *smithyhttp.Respo } } -type awsAwsjson11_deserializeOpListDatasetImportJobs struct { +type awsAwsjson11_deserializeOpListDatasetGroups struct { } -func (*awsAwsjson11_deserializeOpListDatasetImportJobs) ID() string { +func (*awsAwsjson11_deserializeOpListDatasetGroups) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListDatasetImportJobs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListDatasetGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3584,9 +3805,9 @@ func (m *awsAwsjson11_deserializeOpListDatasetImportJobs) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListDatasetImportJobs(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListDatasetGroups(response, &metadata) } - output := &ListDatasetImportJobsOutput{} + output := &ListDatasetGroupsOutput{} out.Result = output var buff [1024]byte @@ -3606,7 +3827,7 @@ func (m *awsAwsjson11_deserializeOpListDatasetImportJobs) HandleDeserialize(ctx return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListDatasetImportJobsOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListDatasetGroupsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3620,7 +3841,7 @@ func (m *awsAwsjson11_deserializeOpListDatasetImportJobs) HandleDeserialize(ctx return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListDatasetImportJobs(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListDatasetGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3661,9 +3882,6 @@ func awsAwsjson11_deserializeOpErrorListDatasetImportJobs(response *smithyhttp.R } switch { - case strings.EqualFold("InvalidInputException", errorCode): - return awsAwsjson11_deserializeErrorInvalidInputException(response, errorBody) - case strings.EqualFold("InvalidNextTokenException", errorCode): return awsAwsjson11_deserializeErrorInvalidNextTokenException(response, errorBody) @@ -3677,14 +3895,14 @@ func awsAwsjson11_deserializeOpErrorListDatasetImportJobs(response *smithyhttp.R } } -type awsAwsjson11_deserializeOpListDatasets struct { +type awsAwsjson11_deserializeOpListDatasetImportJobs struct { } -func (*awsAwsjson11_deserializeOpListDatasets) ID() string { +func (*awsAwsjson11_deserializeOpListDatasetImportJobs) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListDatasets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListDatasetImportJobs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3698,9 +3916,9 @@ func (m *awsAwsjson11_deserializeOpListDatasets) HandleDeserialize(ctx context.C } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListDatasets(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListDatasetImportJobs(response, &metadata) } - output := &ListDatasetsOutput{} + output := &ListDatasetImportJobsOutput{} out.Result = output var buff [1024]byte @@ -3720,7 +3938,7 @@ func (m *awsAwsjson11_deserializeOpListDatasets) HandleDeserialize(ctx context.C return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListDatasetsOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListDatasetImportJobsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3734,7 +3952,7 @@ func (m *awsAwsjson11_deserializeOpListDatasets) HandleDeserialize(ctx context.C return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListDatasets(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListDatasetImportJobs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3775,6 +3993,9 @@ func awsAwsjson11_deserializeOpErrorListDatasets(response *smithyhttp.Response, } switch { + case strings.EqualFold("InvalidInputException", errorCode): + return awsAwsjson11_deserializeErrorInvalidInputException(response, errorBody) + case strings.EqualFold("InvalidNextTokenException", errorCode): return awsAwsjson11_deserializeErrorInvalidNextTokenException(response, errorBody) @@ -3788,14 +4009,14 @@ func awsAwsjson11_deserializeOpErrorListDatasets(response *smithyhttp.Response, } } -type awsAwsjson11_deserializeOpListExplainabilities struct { +type awsAwsjson11_deserializeOpListDatasets struct { } -func (*awsAwsjson11_deserializeOpListExplainabilities) ID() string { +func (*awsAwsjson11_deserializeOpListDatasets) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListExplainabilities) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListDatasets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3809,9 +4030,9 @@ func (m *awsAwsjson11_deserializeOpListExplainabilities) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListExplainabilities(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListDatasets(response, &metadata) } - output := &ListExplainabilitiesOutput{} + output := &ListDatasetsOutput{} out.Result = output var buff [1024]byte @@ -3831,7 +4052,7 @@ func (m *awsAwsjson11_deserializeOpListExplainabilities) HandleDeserialize(ctx c return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListExplainabilitiesOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListDatasetsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3845,7 +4066,7 @@ func (m *awsAwsjson11_deserializeOpListExplainabilities) HandleDeserialize(ctx c return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListExplainabilities(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListDatasets(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3886,9 +4107,6 @@ func awsAwsjson11_deserializeOpErrorListExplainabilities(response *smithyhttp.Re } switch { - case strings.EqualFold("InvalidInputException", errorCode): - return awsAwsjson11_deserializeErrorInvalidInputException(response, errorBody) - case strings.EqualFold("InvalidNextTokenException", errorCode): return awsAwsjson11_deserializeErrorInvalidNextTokenException(response, errorBody) @@ -3902,14 +4120,14 @@ func awsAwsjson11_deserializeOpErrorListExplainabilities(response *smithyhttp.Re } } -type awsAwsjson11_deserializeOpListExplainabilityExports struct { +type awsAwsjson11_deserializeOpListExplainabilities struct { } -func (*awsAwsjson11_deserializeOpListExplainabilityExports) ID() string { +func (*awsAwsjson11_deserializeOpListExplainabilities) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListExplainabilityExports) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListExplainabilities) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3923,9 +4141,9 @@ func (m *awsAwsjson11_deserializeOpListExplainabilityExports) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListExplainabilityExports(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListExplainabilities(response, &metadata) } - output := &ListExplainabilityExportsOutput{} + output := &ListExplainabilitiesOutput{} out.Result = output var buff [1024]byte @@ -3945,7 +4163,7 @@ func (m *awsAwsjson11_deserializeOpListExplainabilityExports) HandleDeserialize( return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListExplainabilityExportsOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListExplainabilitiesOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3959,7 +4177,7 @@ func (m *awsAwsjson11_deserializeOpListExplainabilityExports) HandleDeserialize( return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListExplainabilityExports(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListExplainabilities(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4016,14 +4234,14 @@ func awsAwsjson11_deserializeOpErrorListExplainabilityExports(response *smithyht } } -type awsAwsjson11_deserializeOpListForecastExportJobs struct { +type awsAwsjson11_deserializeOpListExplainabilityExports struct { } -func (*awsAwsjson11_deserializeOpListForecastExportJobs) ID() string { +func (*awsAwsjson11_deserializeOpListExplainabilityExports) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListForecastExportJobs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListExplainabilityExports) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4037,9 +4255,9 @@ func (m *awsAwsjson11_deserializeOpListForecastExportJobs) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListForecastExportJobs(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListExplainabilityExports(response, &metadata) } - output := &ListForecastExportJobsOutput{} + output := &ListExplainabilityExportsOutput{} out.Result = output var buff [1024]byte @@ -4059,7 +4277,7 @@ func (m *awsAwsjson11_deserializeOpListForecastExportJobs) HandleDeserialize(ctx return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListForecastExportJobsOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListExplainabilityExportsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4073,7 +4291,7 @@ func (m *awsAwsjson11_deserializeOpListForecastExportJobs) HandleDeserialize(ctx return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListForecastExportJobs(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListExplainabilityExports(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4130,14 +4348,14 @@ func awsAwsjson11_deserializeOpErrorListForecastExportJobs(response *smithyhttp. } } -type awsAwsjson11_deserializeOpListForecasts struct { +type awsAwsjson11_deserializeOpListForecastExportJobs struct { } -func (*awsAwsjson11_deserializeOpListForecasts) ID() string { +func (*awsAwsjson11_deserializeOpListForecastExportJobs) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListForecasts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListForecastExportJobs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4151,9 +4369,9 @@ func (m *awsAwsjson11_deserializeOpListForecasts) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListForecasts(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListForecastExportJobs(response, &metadata) } - output := &ListForecastsOutput{} + output := &ListForecastExportJobsOutput{} out.Result = output var buff [1024]byte @@ -4173,7 +4391,7 @@ func (m *awsAwsjson11_deserializeOpListForecasts) HandleDeserialize(ctx context. return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListForecastsOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListForecastExportJobsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4187,7 +4405,7 @@ func (m *awsAwsjson11_deserializeOpListForecasts) HandleDeserialize(ctx context. return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListForecasts(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListForecastExportJobs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4244,14 +4462,14 @@ func awsAwsjson11_deserializeOpErrorListForecasts(response *smithyhttp.Response, } } -type awsAwsjson11_deserializeOpListPredictorBacktestExportJobs struct { +type awsAwsjson11_deserializeOpListForecasts struct { } -func (*awsAwsjson11_deserializeOpListPredictorBacktestExportJobs) ID() string { +func (*awsAwsjson11_deserializeOpListForecasts) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListPredictorBacktestExportJobs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListForecasts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4265,9 +4483,9 @@ func (m *awsAwsjson11_deserializeOpListPredictorBacktestExportJobs) HandleDeseri } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListPredictorBacktestExportJobs(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListForecasts(response, &metadata) } - output := &ListPredictorBacktestExportJobsOutput{} + output := &ListForecastsOutput{} out.Result = output var buff [1024]byte @@ -4287,7 +4505,7 @@ func (m *awsAwsjson11_deserializeOpListPredictorBacktestExportJobs) HandleDeseri return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListPredictorBacktestExportJobsOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListForecastsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4301,7 +4519,7 @@ func (m *awsAwsjson11_deserializeOpListPredictorBacktestExportJobs) HandleDeseri return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListPredictorBacktestExportJobs(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListForecasts(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4358,14 +4576,14 @@ func awsAwsjson11_deserializeOpErrorListPredictorBacktestExportJobs(response *sm } } -type awsAwsjson11_deserializeOpListPredictors struct { +type awsAwsjson11_deserializeOpListMonitorEvaluations struct { } -func (*awsAwsjson11_deserializeOpListPredictors) ID() string { +func (*awsAwsjson11_deserializeOpListMonitorEvaluations) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListPredictors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListMonitorEvaluations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4379,9 +4597,9 @@ func (m *awsAwsjson11_deserializeOpListPredictors) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListPredictors(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListMonitorEvaluations(response, &metadata) } - output := &ListPredictorsOutput{} + output := &ListMonitorEvaluationsOutput{} out.Result = output var buff [1024]byte @@ -4401,7 +4619,7 @@ func (m *awsAwsjson11_deserializeOpListPredictors) HandleDeserialize(ctx context return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListPredictorsOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListMonitorEvaluationsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4415,7 +4633,7 @@ func (m *awsAwsjson11_deserializeOpListPredictors) HandleDeserialize(ctx context return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListPredictors(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListMonitorEvaluations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4462,6 +4680,9 @@ func awsAwsjson11_deserializeOpErrorListPredictors(response *smithyhttp.Response case strings.EqualFold("InvalidNextTokenException", errorCode): return awsAwsjson11_deserializeErrorInvalidNextTokenException(response, errorBody) + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -4472,14 +4693,14 @@ func awsAwsjson11_deserializeOpErrorListPredictors(response *smithyhttp.Response } } -type awsAwsjson11_deserializeOpListTagsForResource struct { +type awsAwsjson11_deserializeOpListMonitors struct { } -func (*awsAwsjson11_deserializeOpListTagsForResource) ID() string { +func (*awsAwsjson11_deserializeOpListMonitors) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListMonitors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4493,9 +4714,9 @@ func (m *awsAwsjson11_deserializeOpListTagsForResource) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListTagsForResource(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListMonitors(response, &metadata) } - output := &ListTagsForResourceOutput{} + output := &ListMonitorsOutput{} out.Result = output var buff [1024]byte @@ -4515,7 +4736,7 @@ func (m *awsAwsjson11_deserializeOpListTagsForResource) HandleDeserialize(ctx co return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListTagsForResourceOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListMonitorsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4529,7 +4750,7 @@ func (m *awsAwsjson11_deserializeOpListTagsForResource) HandleDeserialize(ctx co return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListMonitors(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4573,8 +4794,8 @@ func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Res case strings.EqualFold("InvalidInputException", errorCode): return awsAwsjson11_deserializeErrorInvalidInputException(response, errorBody) - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + case strings.EqualFold("InvalidNextTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidNextTokenException(response, errorBody) default: genericError := &smithy.GenericAPIError{ @@ -4586,14 +4807,14 @@ func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Res } } -type awsAwsjson11_deserializeOpStopResource struct { +type awsAwsjson11_deserializeOpListPredictorBacktestExportJobs struct { } -func (*awsAwsjson11_deserializeOpStopResource) ID() string { +func (*awsAwsjson11_deserializeOpListPredictorBacktestExportJobs) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpStopResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListPredictorBacktestExportJobs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4607,21 +4828,43 @@ func (m *awsAwsjson11_deserializeOpStopResource) HandleDeserialize(ctx context.C } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorStopResource(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListPredictorBacktestExportJobs(response, &metadata) } - output := &StopResourceOutput{} + output := &ListPredictorBacktestExportJobsOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListPredictorBacktestExportJobsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsAwsjson11_deserializeOpErrorStopResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListPredictorBacktestExportJobs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4665,11 +4908,8 @@ func awsAwsjson11_deserializeOpErrorStopResource(response *smithyhttp.Response, case strings.EqualFold("InvalidInputException", errorCode): return awsAwsjson11_deserializeErrorInvalidInputException(response, errorBody) - case strings.EqualFold("LimitExceededException", errorCode): - return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) - - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + case strings.EqualFold("InvalidNextTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidNextTokenException(response, errorBody) default: genericError := &smithy.GenericAPIError{ @@ -4681,14 +4921,14 @@ func awsAwsjson11_deserializeOpErrorStopResource(response *smithyhttp.Response, } } -type awsAwsjson11_deserializeOpTagResource struct { +type awsAwsjson11_deserializeOpListPredictors struct { } -func (*awsAwsjson11_deserializeOpTagResource) ID() string { +func (*awsAwsjson11_deserializeOpListPredictors) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListPredictors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4702,9 +4942,9 @@ func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorTagResource(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListPredictors(response, &metadata) } - output := &TagResourceOutput{} + output := &ListPredictorsOutput{} out.Result = output var buff [1024]byte @@ -4724,7 +4964,7 @@ func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Co return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentTagResourceOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListPredictorsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4738,7 +4978,7 @@ func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Co return out, metadata, err } -func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListPredictors(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4782,11 +5022,8 @@ func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, m case strings.EqualFold("InvalidInputException", errorCode): return awsAwsjson11_deserializeErrorInvalidInputException(response, errorBody) - case strings.EqualFold("LimitExceededException", errorCode): - return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) - - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + case strings.EqualFold("InvalidNextTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidNextTokenException(response, errorBody) default: genericError := &smithy.GenericAPIError{ @@ -4798,14 +5035,14 @@ func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, m } } -type awsAwsjson11_deserializeOpUntagResource struct { +type awsAwsjson11_deserializeOpListTagsForResource struct { } -func (*awsAwsjson11_deserializeOpUntagResource) ID() string { +func (*awsAwsjson11_deserializeOpListTagsForResource) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4819,9 +5056,9 @@ func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorUntagResource(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListTagsForResource(response, &metadata) } - output := &UntagResourceOutput{} + output := &ListTagsForResourceOutput{} out.Result = output var buff [1024]byte @@ -4841,7 +5078,7 @@ func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context. return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentUntagResourceOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListTagsForResourceOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4855,7 +5092,7 @@ func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context. return out, metadata, err } -func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4912,14 +5149,14 @@ func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, } } -type awsAwsjson11_deserializeOpUpdateDatasetGroup struct { +type awsAwsjson11_deserializeOpResumeResource struct { } -func (*awsAwsjson11_deserializeOpUpdateDatasetGroup) ID() string { +func (*awsAwsjson11_deserializeOpResumeResource) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpUpdateDatasetGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpResumeResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4933,43 +5170,21 @@ func (m *awsAwsjson11_deserializeOpUpdateDatasetGroup) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorUpdateDatasetGroup(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorResumeResource(response, &metadata) } - output := &UpdateDatasetGroupOutput{} + output := &ResumeResourceOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentUpdateDatasetGroupOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsAwsjson11_deserializeOpErrorUpdateDatasetGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorResumeResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -5013,7 +5228,453 @@ func awsAwsjson11_deserializeOpErrorUpdateDatasetGroup(response *smithyhttp.Resp case strings.EqualFold("InvalidInputException", errorCode): return awsAwsjson11_deserializeErrorInvalidInputException(response, errorBody) - case strings.EqualFold("ResourceInUseException", errorCode): + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson11_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpStopResource struct { +} + +func (*awsAwsjson11_deserializeOpStopResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpStopResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorStopResource(response, &metadata) + } + output := &StopResourceOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorStopResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidInputException", errorCode): + return awsAwsjson11_deserializeErrorInvalidInputException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpTagResource struct { +} + +func (*awsAwsjson11_deserializeOpTagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorTagResource(response, &metadata) + } + output := &TagResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentTagResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidInputException", errorCode): + return awsAwsjson11_deserializeErrorInvalidInputException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUntagResource struct { +} + +func (*awsAwsjson11_deserializeOpUntagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUntagResource(response, &metadata) + } + output := &UntagResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUntagResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidInputException", errorCode): + return awsAwsjson11_deserializeErrorInvalidInputException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateDatasetGroup struct { +} + +func (*awsAwsjson11_deserializeOpUpdateDatasetGroup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateDatasetGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateDatasetGroup(response, &metadata) + } + output := &UpdateDatasetGroupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateDatasetGroupOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateDatasetGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidInputException", errorCode): + return awsAwsjson11_deserializeErrorInvalidInputException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): return awsAwsjson11_deserializeErrorResourceInUseException(response, errorBody) case strings.EqualFold("ResourceNotFoundException", errorCode): @@ -5345,16 +6006,131 @@ func awsAwsjson11_deserializeDocumentArnList(v *[]string, value interface{}) err if !ok { return fmt.Errorf("expected Arn to be of type string, got %T instead", value) } - col = jtv - } - cv = append(cv, col) + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentAttributeConfig(v **types.AttributeConfig, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AttributeConfig + if *v == nil { + sv = &types.AttributeConfig{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AttributeName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Name to be of type string, got %T instead", value) + } + sv.AttributeName = ptr.String(jtv) + } + + case "Transformations": + if err := awsAwsjson11_deserializeDocumentTransformations(&sv.Transformations, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentAttributeConfigs(v *[]types.AttributeConfig, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.AttributeConfig + if *v == nil { + cv = []types.AttributeConfig{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.AttributeConfig + destAddr := &col + if err := awsAwsjson11_deserializeDocumentAttributeConfig(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentBaseline(v **types.Baseline, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Baseline + if *v == nil { + sv = &types.Baseline{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "PredictorBaseline": + if err := awsAwsjson11_deserializeDocumentPredictorBaseline(&sv.PredictorBaseline, value); err != nil { + return err + } + + default: + _, _ = key, value + } } - *v = cv + *v = sv return nil } -func awsAwsjson11_deserializeDocumentAttributeConfig(v **types.AttributeConfig, value interface{}) error { +func awsAwsjson11_deserializeDocumentBaselineMetric(v **types.BaselineMetric, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -5367,27 +6143,56 @@ func awsAwsjson11_deserializeDocumentAttributeConfig(v **types.AttributeConfig, return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.AttributeConfig + var sv *types.BaselineMetric if *v == nil { - sv = &types.AttributeConfig{} + sv = &types.BaselineMetric{} } else { sv = *v } for key, value := range shape { switch key { - case "AttributeName": + case "Name": if value != nil { jtv, ok := value.(string) if !ok { return fmt.Errorf("expected Name to be of type string, got %T instead", value) } - sv.AttributeName = ptr.String(jtv) + sv.Name = ptr.String(jtv) } - case "Transformations": - if err := awsAwsjson11_deserializeDocumentTransformations(&sv.Transformations, value); err != nil { - return err + case "Value": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.Value = ptr.Float64(f64) + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.Value = ptr.Float64(f64) + + default: + return fmt.Errorf("expected Double to be a JSON Number, got %T instead", value) + + } } default: @@ -5399,7 +6204,7 @@ func awsAwsjson11_deserializeDocumentAttributeConfig(v **types.AttributeConfig, return nil } -func awsAwsjson11_deserializeDocumentAttributeConfigs(v *[]types.AttributeConfig, value interface{}) error { +func awsAwsjson11_deserializeDocumentBaselineMetrics(v *[]types.BaselineMetric, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -5412,17 +6217,17 @@ func awsAwsjson11_deserializeDocumentAttributeConfigs(v *[]types.AttributeConfig return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.AttributeConfig + var cv []types.BaselineMetric if *v == nil { - cv = []types.AttributeConfig{} + cv = []types.BaselineMetric{} } else { cv = *v } for _, value := range shape { - var col types.AttributeConfig + var col types.BaselineMetric destAddr := &col - if err := awsAwsjson11_deserializeDocumentAttributeConfig(&destAddr, value); err != nil { + if err := awsAwsjson11_deserializeDocumentBaselineMetric(&destAddr, value); err != nil { return err } col = *destAddr @@ -7733,16 +8538,244 @@ func awsAwsjson11_deserializeDocumentIntegerParameterRange(v **types.IntegerPara if !ok { return fmt.Errorf("expected Name to be of type string, got %T instead", value) } - sv.Name = ptr.String(jtv) + sv.Name = ptr.String(jtv) + } + + case "ScalingType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ScalingType to be of type string, got %T instead", value) + } + sv.ScalingType = types.ScalingType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentIntegerParameterRanges(v *[]types.IntegerParameterRange, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.IntegerParameterRange + if *v == nil { + cv = []types.IntegerParameterRange{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.IntegerParameterRange + destAddr := &col + if err := awsAwsjson11_deserializeDocumentIntegerParameterRange(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidInputException(v **types.InvalidInputException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidInputException + if *v == nil { + sv = &types.InvalidInputException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidNextTokenException(v **types.InvalidNextTokenException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidNextTokenException + if *v == nil { + sv = &types.InvalidNextTokenException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLimitExceededException(v **types.LimitExceededException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LimitExceededException + if *v == nil { + sv = &types.LimitExceededException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentMetricResult(v **types.MetricResult, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MetricResult + if *v == nil { + sv = &types.MetricResult{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MetricName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MetricName to be of type string, got %T instead", value) + } + sv.MetricName = ptr.String(jtv) } - case "ScalingType": + case "MetricValue": if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ScalingType to be of type string, got %T instead", value) + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.MetricValue = ptr.Float64(f64) + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.MetricValue = ptr.Float64(f64) + + default: + return fmt.Errorf("expected Double to be a JSON Number, got %T instead", value) + } - sv.ScalingType = types.ScalingType(jtv) } default: @@ -7754,7 +8787,7 @@ func awsAwsjson11_deserializeDocumentIntegerParameterRange(v **types.IntegerPara return nil } -func awsAwsjson11_deserializeDocumentIntegerParameterRanges(v *[]types.IntegerParameterRange, value interface{}) error { +func awsAwsjson11_deserializeDocumentMetricResults(v *[]types.MetricResult, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -7767,17 +8800,17 @@ func awsAwsjson11_deserializeDocumentIntegerParameterRanges(v *[]types.IntegerPa return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.IntegerParameterRange + var cv []types.MetricResult if *v == nil { - cv = []types.IntegerParameterRange{} + cv = []types.MetricResult{} } else { cv = *v } for _, value := range shape { - var col types.IntegerParameterRange + var col types.MetricResult destAddr := &col - if err := awsAwsjson11_deserializeDocumentIntegerParameterRange(&destAddr, value); err != nil { + if err := awsAwsjson11_deserializeDocumentMetricResult(&destAddr, value); err != nil { return err } col = *destAddr @@ -7788,7 +8821,7 @@ func awsAwsjson11_deserializeDocumentIntegerParameterRanges(v *[]types.IntegerPa return nil } -func awsAwsjson11_deserializeDocumentInvalidInputException(v **types.InvalidInputException, value interface{}) error { +func awsAwsjson11_deserializeDocumentMetrics(v **types.Metrics, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -7801,22 +8834,91 @@ func awsAwsjson11_deserializeDocumentInvalidInputException(v **types.InvalidInpu return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.InvalidInputException + var sv *types.Metrics if *v == nil { - sv = &types.InvalidInputException{} + sv = &types.Metrics{} } else { sv = *v } for key, value := range shape { switch key { - case "Message": + case "AverageWeightedQuantileLoss": if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.AverageWeightedQuantileLoss = ptr.Float64(f64) + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.AverageWeightedQuantileLoss = ptr.Float64(f64) + + default: + return fmt.Errorf("expected Double to be a JSON Number, got %T instead", value) + } - sv.Message = ptr.String(jtv) + } + + case "ErrorMetrics": + if err := awsAwsjson11_deserializeDocumentErrorMetrics(&sv.ErrorMetrics, value); err != nil { + return err + } + + case "RMSE": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.RMSE = ptr.Float64(f64) + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.RMSE = ptr.Float64(f64) + + default: + return fmt.Errorf("expected Double to be a JSON Number, got %T instead", value) + + } + } + + case "WeightedQuantileLosses": + if err := awsAwsjson11_deserializeDocumentWeightedQuantileLosses(&sv.WeightedQuantileLosses, value); err != nil { + return err } default: @@ -7828,7 +8930,7 @@ func awsAwsjson11_deserializeDocumentInvalidInputException(v **types.InvalidInpu return nil } -func awsAwsjson11_deserializeDocumentInvalidNextTokenException(v **types.InvalidNextTokenException, value interface{}) error { +func awsAwsjson11_deserializeDocumentMonitorDataSource(v **types.MonitorDataSource, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -7841,22 +8943,40 @@ func awsAwsjson11_deserializeDocumentInvalidNextTokenException(v **types.Invalid return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.InvalidNextTokenException + var sv *types.MonitorDataSource if *v == nil { - sv = &types.InvalidNextTokenException{} + sv = &types.MonitorDataSource{} } else { sv = *v } for key, value := range shape { switch key { - case "Message": + case "DatasetImportJobArn": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) } - sv.Message = ptr.String(jtv) + sv.DatasetImportJobArn = ptr.String(jtv) + } + + case "ForecastArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.ForecastArn = ptr.String(jtv) + } + + case "PredictorArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.PredictorArn = ptr.String(jtv) } default: @@ -7868,7 +8988,7 @@ func awsAwsjson11_deserializeDocumentInvalidNextTokenException(v **types.Invalid return nil } -func awsAwsjson11_deserializeDocumentLimitExceededException(v **types.LimitExceededException, value interface{}) error { +func awsAwsjson11_deserializeDocumentMonitorInfo(v **types.MonitorInfo, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -7881,22 +9001,31 @@ func awsAwsjson11_deserializeDocumentLimitExceededException(v **types.LimitExcee return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.LimitExceededException + var sv *types.MonitorInfo if *v == nil { - sv = &types.LimitExceededException{} + sv = &types.MonitorInfo{} } else { sv = *v } for key, value := range shape { switch key { - case "Message": + case "MonitorArn": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) } - sv.Message = ptr.String(jtv) + sv.MonitorArn = ptr.String(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Status to be of type string, got %T instead", value) + } + sv.Status = ptr.String(jtv) } default: @@ -7908,7 +9037,41 @@ func awsAwsjson11_deserializeDocumentLimitExceededException(v **types.LimitExcee return nil } -func awsAwsjson11_deserializeDocumentMetrics(v **types.Metrics, value interface{}) error { +func awsAwsjson11_deserializeDocumentMonitors(v *[]types.MonitorSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.MonitorSummary + if *v == nil { + cv = []types.MonitorSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.MonitorSummary + destAddr := &col + if err := awsAwsjson11_deserializeDocumentMonitorSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentMonitorSummary(v **types.MonitorSummary, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -7921,16 +9084,16 @@ func awsAwsjson11_deserializeDocumentMetrics(v **types.Metrics, value interface{ return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.Metrics + var sv *types.MonitorSummary if *v == nil { - sv = &types.Metrics{} + sv = &types.MonitorSummary{} } else { sv = *v } for key, value := range shape { switch key { - case "AverageWeightedQuantileLoss": + case "CreationTime": if value != nil { switch jtv := value.(type) { case json.Number: @@ -7938,38 +9101,15 @@ func awsAwsjson11_deserializeDocumentMetrics(v **types.Metrics, value interface{ if err != nil { return err } - sv.AverageWeightedQuantileLoss = ptr.Float64(f64) - - case string: - var f64 float64 - switch { - case strings.EqualFold(jtv, "NaN"): - f64 = math.NaN() - - case strings.EqualFold(jtv, "Infinity"): - f64 = math.Inf(1) - - case strings.EqualFold(jtv, "-Infinity"): - f64 = math.Inf(-1) - - default: - return fmt.Errorf("unknown JSON number value: %s", jtv) - - } - sv.AverageWeightedQuantileLoss = ptr.Float64(f64) + sv.CreationTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) default: - return fmt.Errorf("expected Double to be a JSON Number, got %T instead", value) + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) } } - case "ErrorMetrics": - if err := awsAwsjson11_deserializeDocumentErrorMetrics(&sv.ErrorMetrics, value); err != nil { - return err - } - - case "RMSE": + case "LastModificationTime": if value != nil { switch jtv := value.(type) { case json.Number: @@ -7977,35 +9117,48 @@ func awsAwsjson11_deserializeDocumentMetrics(v **types.Metrics, value interface{ if err != nil { return err } - sv.RMSE = ptr.Float64(f64) - - case string: - var f64 float64 - switch { - case strings.EqualFold(jtv, "NaN"): - f64 = math.NaN() - - case strings.EqualFold(jtv, "Infinity"): - f64 = math.Inf(1) + sv.LastModificationTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) - case strings.EqualFold(jtv, "-Infinity"): - f64 = math.Inf(-1) + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) - default: - return fmt.Errorf("unknown JSON number value: %s", jtv) + } + } - } - sv.RMSE = ptr.Float64(f64) + case "MonitorArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.MonitorArn = ptr.String(jtv) + } - default: - return fmt.Errorf("expected Double to be a JSON Number, got %T instead", value) + case "MonitorName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Name to be of type string, got %T instead", value) + } + sv.MonitorName = ptr.String(jtv) + } + case "ResourceArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) } + sv.ResourceArn = ptr.String(jtv) } - case "WeightedQuantileLosses": - if err := awsAwsjson11_deserializeDocumentWeightedQuantileLosses(&sv.WeightedQuantileLosses, value); err != nil { - return err + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Status to be of type string, got %T instead", value) + } + sv.Status = ptr.String(jtv) } default: @@ -8201,6 +9354,42 @@ func awsAwsjson11_deserializeDocumentPredictorBacktestExportJobSummary(v **types return nil } +func awsAwsjson11_deserializeDocumentPredictorBaseline(v **types.PredictorBaseline, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PredictorBaseline + if *v == nil { + sv = &types.PredictorBaseline{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "BaselineMetrics": + if err := awsAwsjson11_deserializeDocumentBaselineMetrics(&sv.BaselineMetrics, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentPredictorEvaluationResults(v *[]types.EvaluationResult, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -8235,6 +9424,62 @@ func awsAwsjson11_deserializeDocumentPredictorEvaluationResults(v *[]types.Evalu return nil } +func awsAwsjson11_deserializeDocumentPredictorEvent(v **types.PredictorEvent, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PredictorEvent + if *v == nil { + sv = &types.PredictorEvent{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Datetime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.Datetime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "Detail": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Detail to be of type string, got %T instead", value) + } + sv.Detail = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentPredictorExecution(v **types.PredictorExecution, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -8331,15 +9576,192 @@ func awsAwsjson11_deserializeDocumentPredictorExecutions(v *[]types.PredictorExe var cv []types.PredictorExecution if *v == nil { - cv = []types.PredictorExecution{} + cv = []types.PredictorExecution{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.PredictorExecution + destAddr := &col + if err := awsAwsjson11_deserializeDocumentPredictorExecution(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentPredictorMonitorEvaluation(v **types.PredictorMonitorEvaluation, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PredictorMonitorEvaluation + if *v == nil { + sv = &types.PredictorMonitorEvaluation{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EvaluationState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EvaluationState to be of type string, got %T instead", value) + } + sv.EvaluationState = ptr.String(jtv) + } + + case "EvaluationTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.EvaluationTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Message to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "MetricResults": + if err := awsAwsjson11_deserializeDocumentMetricResults(&sv.MetricResults, value); err != nil { + return err + } + + case "MonitorArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.MonitorArn = ptr.String(jtv) + } + + case "MonitorDataSource": + if err := awsAwsjson11_deserializeDocumentMonitorDataSource(&sv.MonitorDataSource, value); err != nil { + return err + } + + case "NumItemsEvaluated": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.NumItemsEvaluated = ptr.Int64(i64) + } + + case "PredictorEvent": + if err := awsAwsjson11_deserializeDocumentPredictorEvent(&sv.PredictorEvent, value); err != nil { + return err + } + + case "ResourceArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.ResourceArn = ptr.String(jtv) + } + + case "WindowEndDatetime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.WindowEndDatetime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "WindowStartDatetime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.WindowStartDatetime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentPredictorMonitorEvaluations(v *[]types.PredictorMonitorEvaluation, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.PredictorMonitorEvaluation + if *v == nil { + cv = []types.PredictorMonitorEvaluation{} } else { cv = *v } for _, value := range shape { - var col types.PredictorExecution + var col types.PredictorMonitorEvaluation destAddr := &col - if err := awsAwsjson11_deserializeDocumentPredictorExecution(&destAddr, value); err != nil { + if err := awsAwsjson11_deserializeDocumentPredictorMonitorEvaluation(&destAddr, value); err != nil { return err } col = *destAddr @@ -10039,6 +11461,46 @@ func awsAwsjson11_deserializeOpDocumentCreateForecastOutput(v **CreateForecastOu return nil } +func awsAwsjson11_deserializeOpDocumentCreateMonitorOutput(v **CreateMonitorOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateMonitorOutput + if *v == nil { + sv = &CreateMonitorOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MonitorArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.MonitorArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeOpDocumentCreatePredictorBacktestExportJobOutput(v **CreatePredictorBacktestExportJobOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -10247,6 +11709,11 @@ func awsAwsjson11_deserializeOpDocumentDescribeAutoPredictorOutput(v **DescribeA sv.Message = ptr.String(jtv) } + case "MonitorInfo": + if err := awsAwsjson11_deserializeDocumentMonitorInfo(&sv.MonitorInfo, value); err != nil { + return err + } + case "OptimizationMetric": if value != nil { jtv, ok := value.(string) @@ -11253,6 +12720,157 @@ func awsAwsjson11_deserializeOpDocumentDescribeForecastOutput(v **DescribeForeca return nil } +func awsAwsjson11_deserializeOpDocumentDescribeMonitorOutput(v **DescribeMonitorOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeMonitorOutput + if *v == nil { + sv = &DescribeMonitorOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Baseline": + if err := awsAwsjson11_deserializeDocumentBaseline(&sv.Baseline, value); err != nil { + return err + } + + case "CreationTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "EstimatedEvaluationTimeRemainingInMinutes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.EstimatedEvaluationTimeRemainingInMinutes = ptr.Int64(i64) + } + + case "LastEvaluationState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EvaluationState to be of type string, got %T instead", value) + } + sv.LastEvaluationState = ptr.String(jtv) + } + + case "LastEvaluationTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastEvaluationTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "LastModificationTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastModificationTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Message to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "MonitorArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.MonitorArn = ptr.String(jtv) + } + + case "MonitorName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Name to be of type string, got %T instead", value) + } + sv.MonitorName = ptr.String(jtv) + } + + case "ResourceArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.ResourceArn = ptr.String(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Status to be of type string, got %T instead", value) + } + sv.Status = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeOpDocumentDescribePredictorBacktestExportJobOutput(v **DescribePredictorBacktestExportJobOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -11973,6 +13591,96 @@ func awsAwsjson11_deserializeOpDocumentListForecastsOutput(v **ListForecastsOutp return nil } +func awsAwsjson11_deserializeOpDocumentListMonitorEvaluationsOutput(v **ListMonitorEvaluationsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListMonitorEvaluationsOutput + if *v == nil { + sv = &ListMonitorEvaluationsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "PredictorMonitorEvaluations": + if err := awsAwsjson11_deserializeDocumentPredictorMonitorEvaluations(&sv.PredictorMonitorEvaluations, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListMonitorsOutput(v **ListMonitorsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListMonitorsOutput + if *v == nil { + sv = &ListMonitorsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Monitors": + if err := awsAwsjson11_deserializeDocumentMonitors(&sv.Monitors, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeOpDocumentListPredictorBacktestExportJobsOutput(v **ListPredictorBacktestExportJobsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/forecast/generated.json b/service/forecast/generated.json index bacd4c5f6da..8e8d78e7965 100644 --- a/service/forecast/generated.json +++ b/service/forecast/generated.json @@ -16,6 +16,7 @@ "api_op_CreateExplainabilityExport.go", "api_op_CreateForecast.go", "api_op_CreateForecastExportJob.go", + "api_op_CreateMonitor.go", "api_op_CreatePredictor.go", "api_op_CreatePredictorBacktestExportJob.go", "api_op_DeleteDataset.go", @@ -25,6 +26,7 @@ "api_op_DeleteExplainabilityExport.go", "api_op_DeleteForecast.go", "api_op_DeleteForecastExportJob.go", + "api_op_DeleteMonitor.go", "api_op_DeletePredictor.go", "api_op_DeletePredictorBacktestExportJob.go", "api_op_DeleteResourceTree.go", @@ -36,6 +38,7 @@ "api_op_DescribeExplainabilityExport.go", "api_op_DescribeForecast.go", "api_op_DescribeForecastExportJob.go", + "api_op_DescribeMonitor.go", "api_op_DescribePredictor.go", "api_op_DescribePredictorBacktestExportJob.go", "api_op_GetAccuracyMetrics.go", @@ -46,9 +49,12 @@ "api_op_ListExplainabilityExports.go", "api_op_ListForecastExportJobs.go", "api_op_ListForecasts.go", + "api_op_ListMonitorEvaluations.go", + "api_op_ListMonitors.go", "api_op_ListPredictorBacktestExportJobs.go", "api_op_ListPredictors.go", "api_op_ListTagsForResource.go", + "api_op_ResumeResource.go", "api_op_StopResource.go", "api_op_TagResource.go", "api_op_UntagResource.go", diff --git a/service/forecast/serializers.go b/service/forecast/serializers.go index db402d48afd..0fe1ecd19dc 100644 --- a/service/forecast/serializers.go +++ b/service/forecast/serializers.go @@ -456,6 +456,61 @@ func (m *awsAwsjson11_serializeOpCreateForecastExportJob) HandleSerialize(ctx co return next.HandleSerialize(ctx, in) } +type awsAwsjson11_serializeOpCreateMonitor struct { +} + +func (*awsAwsjson11_serializeOpCreateMonitor) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateMonitor) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateMonitorInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonForecast.CreateMonitor") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateMonitorInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson11_serializeOpCreatePredictor struct { } @@ -951,6 +1006,61 @@ func (m *awsAwsjson11_serializeOpDeleteForecastExportJob) HandleSerialize(ctx co return next.HandleSerialize(ctx, in) } +type awsAwsjson11_serializeOpDeleteMonitor struct { +} + +func (*awsAwsjson11_serializeOpDeleteMonitor) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteMonitor) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteMonitorInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonForecast.DeleteMonitor") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteMonitorInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson11_serializeOpDeletePredictor struct { } @@ -1556,6 +1666,61 @@ func (m *awsAwsjson11_serializeOpDescribeForecastExportJob) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } +type awsAwsjson11_serializeOpDescribeMonitor struct { +} + +func (*awsAwsjson11_serializeOpDescribeMonitor) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeMonitor) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeMonitorInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonForecast.DescribeMonitor") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeMonitorInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson11_serializeOpDescribePredictor struct { } @@ -2106,6 +2271,116 @@ func (m *awsAwsjson11_serializeOpListForecasts) HandleSerialize(ctx context.Cont return next.HandleSerialize(ctx, in) } +type awsAwsjson11_serializeOpListMonitorEvaluations struct { +} + +func (*awsAwsjson11_serializeOpListMonitorEvaluations) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListMonitorEvaluations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListMonitorEvaluationsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonForecast.ListMonitorEvaluations") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListMonitorEvaluationsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListMonitors struct { +} + +func (*awsAwsjson11_serializeOpListMonitors) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListMonitors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListMonitorsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonForecast.ListMonitors") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListMonitorsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson11_serializeOpListPredictorBacktestExportJobs struct { } @@ -2271,6 +2546,61 @@ func (m *awsAwsjson11_serializeOpListTagsForResource) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } +type awsAwsjson11_serializeOpResumeResource struct { +} + +func (*awsAwsjson11_serializeOpResumeResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpResumeResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ResumeResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonForecast.ResumeResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentResumeResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson11_serializeOpStopResource struct { } @@ -3015,6 +3345,18 @@ func awsAwsjson11_serializeDocumentIntegerParameterRanges(v []types.IntegerParam return nil } +func awsAwsjson11_serializeDocumentMonitorConfig(v *types.MonitorConfig, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MonitorName != nil { + ok := object.Key("MonitorName") + ok.String(*v.MonitorName) + } + + return nil +} + func awsAwsjson11_serializeDocumentParameterRanges(v *types.ParameterRanges, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -3260,6 +3602,13 @@ func awsAwsjson11_serializeOpDocumentCreateAutoPredictorInput(v *CreateAutoPredi } } + if v.MonitorConfig != nil { + ok := object.Key("MonitorConfig") + if err := awsAwsjson11_serializeDocumentMonitorConfig(v.MonitorConfig, ok); err != nil { + return err + } + } + if len(v.OptimizationMetric) > 0 { ok := object.Key("OptimizationMetric") ok.String(string(v.OptimizationMetric)) @@ -3568,6 +3917,30 @@ func awsAwsjson11_serializeOpDocumentCreateForecastInput(v *CreateForecastInput, return nil } +func awsAwsjson11_serializeOpDocumentCreateMonitorInput(v *CreateMonitorInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MonitorName != nil { + ok := object.Key("MonitorName") + ok.String(*v.MonitorName) + } + + if v.ResourceArn != nil { + ok := object.Key("ResourceArn") + ok.String(*v.ResourceArn) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTags(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + func awsAwsjson11_serializeOpDocumentCreatePredictorBacktestExportJobInput(v *CreatePredictorBacktestExportJobInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -3781,6 +4154,18 @@ func awsAwsjson11_serializeOpDocumentDeleteForecastInput(v *DeleteForecastInput, return nil } +func awsAwsjson11_serializeOpDocumentDeleteMonitorInput(v *DeleteMonitorInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MonitorArn != nil { + ok := object.Key("MonitorArn") + ok.String(*v.MonitorArn) + } + + return nil +} + func awsAwsjson11_serializeOpDocumentDeletePredictorBacktestExportJobInput(v *DeletePredictorBacktestExportJobInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -3913,6 +4298,18 @@ func awsAwsjson11_serializeOpDocumentDescribeForecastInput(v *DescribeForecastIn return nil } +func awsAwsjson11_serializeOpDocumentDescribeMonitorInput(v *DescribeMonitorInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MonitorArn != nil { + ok := object.Key("MonitorArn") + ok.String(*v.MonitorArn) + } + + return nil +} + func awsAwsjson11_serializeOpDocumentDescribePredictorBacktestExportJobInput(v *DescribePredictorBacktestExportJobInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -4103,6 +4500,59 @@ func awsAwsjson11_serializeOpDocumentListForecastsInput(v *ListForecastsInput, v return nil } +func awsAwsjson11_serializeOpDocumentListMonitorEvaluationsInput(v *ListMonitorEvaluationsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Filters != nil { + ok := object.Key("Filters") + if err := awsAwsjson11_serializeDocumentFilters(v.Filters, ok); err != nil { + return err + } + } + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.MonitorArn != nil { + ok := object.Key("MonitorArn") + ok.String(*v.MonitorArn) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListMonitorsInput(v *ListMonitorsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Filters != nil { + ok := object.Key("Filters") + if err := awsAwsjson11_serializeDocumentFilters(v.Filters, ok); err != nil { + return err + } + } + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + return nil +} + func awsAwsjson11_serializeOpDocumentListPredictorBacktestExportJobsInput(v *ListPredictorBacktestExportJobsInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -4163,6 +4613,18 @@ func awsAwsjson11_serializeOpDocumentListTagsForResourceInput(v *ListTagsForReso return nil } +func awsAwsjson11_serializeOpDocumentResumeResourceInput(v *ResumeResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceArn != nil { + ok := object.Key("ResourceArn") + ok.String(*v.ResourceArn) + } + + return nil +} + func awsAwsjson11_serializeOpDocumentStopResourceInput(v *StopResourceInput, value smithyjson.Value) error { object := value.Object() defer object.Close() diff --git a/service/forecast/types/types.go b/service/forecast/types/types.go index 66c738c416b..c397ec52585 100644 --- a/service/forecast/types/types.go +++ b/service/forecast/types/types.go @@ -28,8 +28,8 @@ type AdditionalDataset struct { Name *string // Weather Index To enable the Weather Index, do not specify a value for - // Configuration. Holidays To enable Holidays, set CountryCode to one of the - // following two-letter country codes: + // Configuration. Holidays Holidays To enable Holidays, set CountryCode to one of + // the following two-letter country codes: // // * "AL" - ALBANIA // @@ -240,6 +240,32 @@ type AttributeConfig struct { noSmithyDocumentSerde } +// Metrics you can use as a baseline for comparison purposes. Use these metrics +// when you interpret monitoring results for an auto predictor. +type Baseline struct { + + // The initial accuracy metrics + // (https://docs.aws.amazon.com/forecast/latest/dg/metrics.html) for the predictor + // you are monitoring. Use these metrics as a baseline for comparison purposes as + // you use your predictor and the metrics change. + PredictorBaseline *PredictorBaseline + + noSmithyDocumentSerde +} + +// An individual metric that you can use for comparison as you evaluate your +// monitoring results. +type BaselineMetric struct { + + // The name of the metric. + Name *string + + // The value for the metric. + Value *float64 + + noSmithyDocumentSerde +} + // Specifies a categorical hyperparameter and it's range of tunable values. This // object is part of the ParameterRanges object. type CategoricalParameterRange struct { @@ -326,7 +352,9 @@ type DataDestination struct { } // Provides a summary of the dataset group properties used in the ListDatasetGroups +// (https://docs.aws.amazon.com/forecast/latest/dg/API_ListDatasetGroups.html) // operation. To get the complete set of properties, call the DescribeDatasetGroup +// (https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDatasetGroup.html) // operation, and provide the DatasetGroupArn. type DatasetGroupSummary struct { @@ -340,16 +368,22 @@ type DatasetGroupSummary struct { DatasetGroupName *string // When the dataset group was created or last updated from a call to the - // UpdateDatasetGroup operation. While the dataset group is being updated, - // LastModificationTime is the current time of the ListDatasetGroups call. + // UpdateDatasetGroup + // (https://docs.aws.amazon.com/forecast/latest/dg/API_UpdateDatasetGroup.html) + // operation. While the dataset group is being updated, LastModificationTime is the + // current time of the ListDatasetGroups call. LastModificationTime *time.Time noSmithyDocumentSerde } // Provides a summary of the dataset import job properties used in the -// ListDatasetImportJobs operation. To get the complete set of properties, call the -// DescribeDatasetImportJob operation, and provide the DatasetImportJobArn. +// ListDatasetImportJobs +// (https://docs.aws.amazon.com/forecast/latest/dg/API_ListDatasetImportJobs.html) +// operation. To get the complete set of properties, call the +// DescribeDatasetImportJob +// (https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDatasetImportJob.html) +// operation, and provide the DatasetImportJobArn. type DatasetImportJobSummary struct { // When the dataset import job was created. @@ -403,9 +437,11 @@ type DatasetImportJobSummary struct { noSmithyDocumentSerde } -// Provides a summary of the dataset properties used in the ListDatasets operation. -// To get the complete set of properties, call the DescribeDataset operation, and -// provide the DatasetArn. +// Provides a summary of the dataset properties used in the ListDatasets +// (https://docs.aws.amazon.com/forecast/latest/dg/API_ListDatasets.html) +// operation. To get the complete set of properties, call the DescribeDataset +// (https://docs.aws.amazon.com/forecast/latest/dg/API_DescribeDataset.html) +// operation, and provide the DatasetArn. type DatasetSummary struct { // When the dataset was created. @@ -425,8 +461,10 @@ type DatasetSummary struct { // When you create a dataset, LastModificationTime is the same as CreationTime. // While data is being imported to the dataset, LastModificationTime is the current - // time of the ListDatasets call. After a CreateDatasetImportJob operation has - // finished, LastModificationTime is when the import job completed or failed. + // time of the ListDatasets call. After a CreateDatasetImportJob + // (https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetImportJob.html) + // operation has finished, LastModificationTime is when the import job completed or + // failed. LastModificationTime *time.Time noSmithyDocumentSerde @@ -1044,6 +1082,22 @@ type IntegerParameterRange struct { noSmithyDocumentSerde } +// An individual metric Forecast calculated when monitoring predictor usage. You +// can compare the value for this metric to the metric's value in the Baseline to +// see how your predictor's performance is changing. For more information about +// metrics generated by Forecast see Evaluating Predictor Accuracy +// (https://docs.aws.amazon.com/forecast/latest/dg/metrics.html) +type MetricResult struct { + + // The name of the metric. + MetricName *string + + // The value for the metric. + MetricValue *float64 + + noSmithyDocumentSerde +} + // Provides metrics that are used to evaluate the performance of a predictor. This // object is part of the WindowSummary object. type Metrics struct { @@ -1070,6 +1124,107 @@ type Metrics struct { noSmithyDocumentSerde } +// The configuration details for the predictor monitor. +type MonitorConfig struct { + + // The name of the monitor resource. + // + // This member is required. + MonitorName *string + + noSmithyDocumentSerde +} + +// The source of the data the monitor used during the evaluation. +type MonitorDataSource struct { + + // The Amazon Resource Name (ARN) of the dataset import job used to import the data + // that initiated the monitor evaluation. + DatasetImportJobArn *string + + // The Amazon Resource Name (ARN) of the forecast the monitor used during the + // evaluation. + ForecastArn *string + + // The Amazon Resource Name (ARN) of the predictor resource you are monitoring. + PredictorArn *string + + noSmithyDocumentSerde +} + +// Provides information about the monitor resource. +type MonitorInfo struct { + + // The Amazon Resource Name (ARN) of the monitor resource. + MonitorArn *string + + // The status of the monitor. States include: + // + // * ACTIVE + // + // * ACTIVE_STOPPING, + // ACTIVE_STOPPED + // + // * UPDATE_IN_PROGRESS + // + // * CREATE_PENDING, CREATE_IN_PROGRESS, + // CREATE_FAILED + // + // * DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED + Status *string + + noSmithyDocumentSerde +} + +// Provides a summary of the monitor properties used in the ListMonitors operation. +// To get a complete set of properties, call the DescribeMonitor operation, and +// provide the listed MonitorArn. +type MonitorSummary struct { + + // When the monitor resource was created. + CreationTime *time.Time + + // The last time the monitor resource was modified. The timestamp depends on the + // status of the job: + // + // * CREATE_PENDING - The CreationTime. + // + // * CREATE_IN_PROGRESS - + // The current timestamp. + // + // * STOPPED - When the resource stopped. + // + // * ACTIVE or + // CREATE_FAILED - When the monitor creation finished or failed. + LastModificationTime *time.Time + + // The Amazon Resource Name (ARN) of the monitor resource. + MonitorArn *string + + // The name of the monitor resource. + MonitorName *string + + // The Amazon Resource Name (ARN) of the predictor being monitored. + ResourceArn *string + + // The status of the monitor. States include: + // + // * ACTIVE + // + // * ACTIVE_STOPPING, + // ACTIVE_STOPPED + // + // * UPDATE_IN_PROGRESS + // + // * CREATE_PENDING, CREATE_IN_PROGRESS, + // CREATE_FAILED + // + // * DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED + Status *string + + noSmithyDocumentSerde +} + // Specifies the categorical, continuous, and integer hyperparameters, and their // ranges of tunable values. The range of tunable values determines which values // that a hyperparameter tuning job can choose for the specified hyperparameter. @@ -1144,6 +1299,34 @@ type PredictorBacktestExportJobSummary struct { noSmithyDocumentSerde } +// Metrics you can use as a baseline for comparison purposes. Use these metrics +// when you interpret monitoring results for an auto predictor. +type PredictorBaseline struct { + + // The initial accuracy metrics + // (https://docs.aws.amazon.com/forecast/latest/dg/metrics.html) for the predictor. + // Use these metrics as a baseline for comparison purposes as you use your + // predictor and the metrics change. + BaselineMetrics []BaselineMetric + + noSmithyDocumentSerde +} + +// Provides details about a predictor event, such as a retraining. +type PredictorEvent struct { + + // The timestamp for when the event occurred. + Datetime *time.Time + + // The type of event. For example, Retrain. A retraining event denotes the + // timepoint when a predictor was retrained. Any monitor results from before the + // Datetime are from the previous predictor. Any new metrics are for the newly + // retrained predictor. + Detail *string + + noSmithyDocumentSerde +} + // The algorithm used to perform a backtest and the status of those tests. type PredictorExecution struct { @@ -1172,6 +1355,48 @@ type PredictorExecutionDetails struct { noSmithyDocumentSerde } +// Describes the results of a monitor evaluation. +type PredictorMonitorEvaluation struct { + + // The status of the monitor evaluation. The state can be SUCCESS or FAILURE. + EvaluationState *string + + // The timestamp that indicates when the monitor evaluation was started. + EvaluationTime *time.Time + + // Information about any errors that may have occurred during the monitor + // evaluation. + Message *string + + // A list of metrics Forecast calculated when monitoring a predictor. You can + // compare the value for each metric in the list to the metric's value in the + // Baseline to see how your predictor's performance is changing. + MetricResults []MetricResult + + MonitorArn *string + + // The source of the data the monitor resource used during the evaluation. + MonitorDataSource *MonitorDataSource + + // The number of items considered during the evaluation. + NumItemsEvaluated *int64 + + // Provides details about a predictor event, such as a retraining. + PredictorEvent *PredictorEvent + + ResourceArn *string + + // The timestamp that indicates the end of the window that is used for monitor + // evaluation. + WindowEndDatetime *time.Time + + // The timestamp that indicates the start of the window that is used for monitor + // evaluation. + WindowStartDatetime *time.Time + + noSmithyDocumentSerde +} + // Provides a summary of the predictor properties that are used in the // ListPredictors operation. To get the complete set of properties, call the // DescribePredictor operation, and provide the listed PredictorArn. @@ -1287,21 +1512,26 @@ type Schema struct { } // An attribute of a schema, which defines a dataset field. A schema attribute is -// required for every field in a dataset. The Schema object contains an array of -// SchemaAttribute objects. +// required for every field in a dataset. The Schema +// (https://docs.aws.amazon.com/forecast/latest/dg/API_Schema.html) object contains +// an array of SchemaAttribute objects. type SchemaAttribute struct { // The name of the dataset field. AttributeName *string - // The data type of the field. + // The data type of the field. For a related time series dataset, other than date, + // item_id, and forecast dimensions attributes, all attributes should be of + // numerical type (integer/float). AttributeType AttributeType noSmithyDocumentSerde } // Provides statistics for each data field imported into to an Amazon Forecast -// dataset with the CreateDatasetImportJob operation. +// dataset with the CreateDatasetImportJob +// (https://docs.aws.amazon.com/forecast/latest/dg/API_CreateDatasetImportJob.html) +// operation. type Statistics struct { // For a numeric field, the average value in the field. diff --git a/service/forecast/validators.go b/service/forecast/validators.go index 1ebc4e337dc..43a4c626a57 100644 --- a/service/forecast/validators.go +++ b/service/forecast/validators.go @@ -170,6 +170,26 @@ func (m *validateOpCreateForecast) HandleInitialize(ctx context.Context, in midd return next.HandleInitialize(ctx, in) } +type validateOpCreateMonitor struct { +} + +func (*validateOpCreateMonitor) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateMonitor) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateMonitorInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateMonitorInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpCreatePredictorBacktestExportJob struct { } @@ -350,6 +370,26 @@ func (m *validateOpDeleteForecast) HandleInitialize(ctx context.Context, in midd return next.HandleInitialize(ctx, in) } +type validateOpDeleteMonitor struct { +} + +func (*validateOpDeleteMonitor) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteMonitor) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteMonitorInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteMonitorInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDeletePredictorBacktestExportJob struct { } @@ -570,6 +610,26 @@ func (m *validateOpDescribeForecast) HandleInitialize(ctx context.Context, in mi return next.HandleInitialize(ctx, in) } +type validateOpDescribeMonitor struct { +} + +func (*validateOpDescribeMonitor) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeMonitor) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeMonitorInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeMonitorInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDescribePredictorBacktestExportJob struct { } @@ -730,6 +790,46 @@ func (m *validateOpListForecasts) HandleInitialize(ctx context.Context, in middl return next.HandleInitialize(ctx, in) } +type validateOpListMonitorEvaluations struct { +} + +func (*validateOpListMonitorEvaluations) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListMonitorEvaluations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListMonitorEvaluationsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListMonitorEvaluationsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListMonitors struct { +} + +func (*validateOpListMonitors) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListMonitors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListMonitorsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListMonitorsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpListPredictorBacktestExportJobs struct { } @@ -790,6 +890,26 @@ func (m *validateOpListTagsForResource) HandleInitialize(ctx context.Context, in return next.HandleInitialize(ctx, in) } +type validateOpResumeResource struct { +} + +func (*validateOpResumeResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpResumeResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ResumeResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpResumeResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpStopResource struct { } @@ -902,6 +1022,10 @@ func addOpCreateForecastValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateForecast{}, middleware.After) } +func addOpCreateMonitorValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateMonitor{}, middleware.After) +} + func addOpCreatePredictorBacktestExportJobValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreatePredictorBacktestExportJob{}, middleware.After) } @@ -938,6 +1062,10 @@ func addOpDeleteForecastValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDeleteForecast{}, middleware.After) } +func addOpDeleteMonitorValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteMonitor{}, middleware.After) +} + func addOpDeletePredictorBacktestExportJobValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDeletePredictorBacktestExportJob{}, middleware.After) } @@ -982,6 +1110,10 @@ func addOpDescribeForecastValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDescribeForecast{}, middleware.After) } +func addOpDescribeMonitorValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeMonitor{}, middleware.After) +} + func addOpDescribePredictorBacktestExportJobValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDescribePredictorBacktestExportJob{}, middleware.After) } @@ -1014,6 +1146,14 @@ func addOpListForecastsValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpListForecasts{}, middleware.After) } +func addOpListMonitorEvaluationsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListMonitorEvaluations{}, middleware.After) +} + +func addOpListMonitorsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListMonitors{}, middleware.After) +} + func addOpListPredictorBacktestExportJobsValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpListPredictorBacktestExportJobs{}, middleware.After) } @@ -1026,6 +1166,10 @@ func addOpListTagsForResourceValidationMiddleware(stack *middleware.Stack) error return stack.Initialize.Add(&validateOpListTagsForResource{}, middleware.After) } +func addOpResumeResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpResumeResource{}, middleware.After) +} + func addOpStopResourceValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpStopResource{}, middleware.After) } @@ -1483,6 +1627,21 @@ func validateIntegerParameterRanges(v []types.IntegerParameterRange) error { } } +func validateMonitorConfig(v *types.MonitorConfig) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "MonitorConfig"} + if v.MonitorName == nil { + invalidParams.Add(smithy.NewErrParamRequired("MonitorName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateParameterRanges(v *types.ParameterRanges) error { if v == nil { return nil @@ -1621,6 +1780,11 @@ func validateOpCreateAutoPredictorInput(v *CreateAutoPredictorInput) error { invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) } } + if v.MonitorConfig != nil { + if err := validateMonitorConfig(v.MonitorConfig); err != nil { + invalidParams.AddNested("MonitorConfig", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { @@ -1833,6 +1997,29 @@ func validateOpCreateForecastInput(v *CreateForecastInput) error { } } +func validateOpCreateMonitorInput(v *CreateMonitorInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateMonitorInput"} + if v.MonitorName == nil { + invalidParams.Add(smithy.NewErrParamRequired("MonitorName")) + } + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if v.Tags != nil { + if err := validateTags(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpCreatePredictorBacktestExportJobInput(v *CreatePredictorBacktestExportJobInput) error { if v == nil { return nil @@ -2015,6 +2202,21 @@ func validateOpDeleteForecastInput(v *DeleteForecastInput) error { } } +func validateOpDeleteMonitorInput(v *DeleteMonitorInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteMonitorInput"} + if v.MonitorArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("MonitorArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDeletePredictorBacktestExportJobInput(v *DeletePredictorBacktestExportJobInput) error { if v == nil { return nil @@ -2180,6 +2382,21 @@ func validateOpDescribeForecastInput(v *DescribeForecastInput) error { } } +func validateOpDescribeMonitorInput(v *DescribeMonitorInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeMonitorInput"} + if v.MonitorArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("MonitorArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDescribePredictorBacktestExportJobInput(v *DescribePredictorBacktestExportJobInput) error { if v == nil { return nil @@ -2310,6 +2527,43 @@ func validateOpListForecastsInput(v *ListForecastsInput) error { } } +func validateOpListMonitorEvaluationsInput(v *ListMonitorEvaluationsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListMonitorEvaluationsInput"} + if v.MonitorArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("MonitorArn")) + } + if v.Filters != nil { + if err := validateFilters(v.Filters); err != nil { + invalidParams.AddNested("Filters", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListMonitorsInput(v *ListMonitorsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListMonitorsInput"} + if v.Filters != nil { + if err := validateFilters(v.Filters); err != nil { + invalidParams.AddNested("Filters", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpListPredictorBacktestExportJobsInput(v *ListPredictorBacktestExportJobsInput) error { if v == nil { return nil @@ -2359,6 +2613,21 @@ func validateOpListTagsForResourceInput(v *ListTagsForResourceInput) error { } } +func validateOpResumeResourceInput(v *ResumeResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ResumeResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpStopResourceInput(v *StopResourceInput) error { if v == nil { return nil diff --git a/service/personalize/api_op_CreateRecommender.go b/service/personalize/api_op_CreateRecommender.go index 153c9abbb54..049592ba85e 100644 --- a/service/personalize/api_op_CreateRecommender.go +++ b/service/personalize/api_op_CreateRecommender.go @@ -39,10 +39,14 @@ import ( // * CREATE PENDING > // CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED // -// * DELETE PENDING > DELETE -// IN_PROGRESS +// * STOP PENDING > STOP +// IN_PROGRESS > INACTIVE > START PENDING > START IN_PROGRESS > ACTIVE // -// To get the recommender status, call DescribeRecommender +// * DELETE +// PENDING > DELETE IN_PROGRESS +// +// To get the recommender status, call +// DescribeRecommender // (https://docs.aws.amazon.com/personalize/latest/dg/API_DescribeRecommender.html). // Wait until the status of the recommender is ACTIVE before asking the recommender // for recommendations. Related APIs diff --git a/service/personalize/api_op_DescribeRecommender.go b/service/personalize/api_op_DescribeRecommender.go index 032bab080bf..d9f0b41a224 100644 --- a/service/personalize/api_op_DescribeRecommender.go +++ b/service/personalize/api_op_DescribeRecommender.go @@ -17,11 +17,15 @@ import ( // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE // -or- CREATE FAILED // +// * STOP PENDING > STOP IN_PROGRESS > INACTIVE > START PENDING +// > START IN_PROGRESS > ACTIVE +// // * DELETE PENDING > DELETE IN_PROGRESS // -// When the status is -// CREATE FAILED, the response includes the failureReason key, which describes why. -// For more information on recommenders, see CreateRecommender +// When the +// status is CREATE FAILED, the response includes the failureReason key, which +// describes why. The modelMetrics key is null when the recommender is being +// created or deleted. For more information on recommenders, see CreateRecommender // (https://docs.aws.amazon.com/personalize/latest/dg/API_CreateRecommender.html). func (c *Client) DescribeRecommender(ctx context.Context, params *DescribeRecommenderInput, optFns ...func(*Options)) (*DescribeRecommenderOutput, error) { if params == nil { diff --git a/service/personalize/api_op_GetSolutionMetrics.go b/service/personalize/api_op_GetSolutionMetrics.go index 01d809280ba..36c3084ffa5 100644 --- a/service/personalize/api_op_GetSolutionMetrics.go +++ b/service/personalize/api_op_GetSolutionMetrics.go @@ -38,7 +38,9 @@ type GetSolutionMetricsInput struct { type GetSolutionMetricsOutput struct { - // The metrics for the solution version. + // The metrics for the solution version. For more information, see Evaluating a + // solution version with metrics + // (https://docs.aws.amazon.com/personalize/latest/dg/working-with-training-metrics.html). Metrics map[string]float64 // The same solution version ARN as specified in the request. diff --git a/service/personalize/deserializers.go b/service/personalize/deserializers.go index 69a20b4de44..eb7ca6924df 100644 --- a/service/personalize/deserializers.go +++ b/service/personalize/deserializers.go @@ -1253,6 +1253,9 @@ func awsAwsjson11_deserializeOpErrorCreateRecommender(response *smithyhttp.Respo case strings.EqualFold("ResourceAlreadyExistsException", errorCode): return awsAwsjson11_deserializeErrorResourceAlreadyExistsException(response, errorBody) + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson11_deserializeErrorResourceInUseException(response, errorBody) + case strings.EqualFold("ResourceNotFoundException", errorCode): return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) @@ -12117,6 +12120,11 @@ func awsAwsjson11_deserializeDocumentRecommender(v **types.Recommender, value in return err } + case "modelMetrics": + if err := awsAwsjson11_deserializeDocumentMetrics(&sv.ModelMetrics, value); err != nil { + return err + } + case "name": if value != nil { jtv, ok := value.(string) diff --git a/service/personalize/types/types.go b/service/personalize/types/types.go index 11a0ce5e4b1..fb2edbe3115 100644 --- a/service/personalize/types/types.go +++ b/service/personalize/types/types.go @@ -1320,6 +1320,11 @@ type Recommender struct { // Provides a summary of the latest updates to the recommender. LatestRecommenderUpdate *RecommenderUpdateSummary + // Provides evaluation metrics that help you determine the performance of a + // recommender. For more information, see Evaluating a recommender + // (https://docs.aws.amazon.com/personalize/latest/dg/evaluating-recommenders.html). + ModelMetrics map[string]float64 + // The name of the recommender. Name *string @@ -1339,7 +1344,10 @@ type Recommender struct { // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED // // * - // DELETE PENDING > DELETE IN_PROGRESS + // STOP PENDING > STOP IN_PROGRESS > INACTIVE > START PENDING > START IN_PROGRESS > + // ACTIVE + // + // * DELETE PENDING > DELETE IN_PROGRESS Status *string noSmithyDocumentSerde @@ -1394,7 +1402,10 @@ type RecommenderSummary struct { // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED // // * - // DELETE PENDING > DELETE IN_PROGRESS + // STOP PENDING > STOP IN_PROGRESS > INACTIVE > START PENDING > START IN_PROGRESS > + // ACTIVE + // + // * DELETE PENDING > DELETE IN_PROGRESS Status *string noSmithyDocumentSerde @@ -1424,6 +1435,9 @@ type RecommenderUpdateSummary struct { // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE // FAILED // + // * STOP PENDING > STOP IN_PROGRESS > INACTIVE > START PENDING > START + // IN_PROGRESS > ACTIVE + // // * DELETE PENDING > DELETE IN_PROGRESS Status *string diff --git a/service/proton/internal/endpoints/endpoints.go b/service/proton/internal/endpoints/endpoints.go index fbf78457649..69d5672b6aa 100644 --- a/service/proton/internal/endpoints/endpoints.go +++ b/service/proton/internal/endpoints/endpoints.go @@ -134,6 +134,23 @@ var defaultPartitions = endpoints.Partitions{ }, RegionRegex: partitionRegexp.Aws, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{}, + }, }, { ID: "aws-cn", diff --git a/service/wellarchitected/internal/endpoints/endpoints.go b/service/wellarchitected/internal/endpoints/endpoints.go index df152d88285..53371017b05 100644 --- a/service/wellarchitected/internal/endpoints/endpoints.go +++ b/service/wellarchitected/internal/endpoints/endpoints.go @@ -134,6 +134,62 @@ var defaultPartitions = endpoints.Partitions{ }, RegionRegex: partitionRegexp.Aws, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{}, + }, }, { ID: "aws-cn",